code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(A__ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Tuple ):
_lowerCAmelCase = _distribute_shards(**A__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :List[str] ):
_lowerCAmelCase = _split_gen_kwargs(A__ , A__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def A (__lowerCamelCase :str , __lowerCamelCase :List[str] ):
if expected is RuntimeError:
with pytest.raises(A__ ):
_number_of_shards_in_gen_kwargs(A__ )
else:
_lowerCAmelCase = _number_of_shards_in_gen_kwargs(A__ )
assert out == expected
| 5 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ , A__ , A__ , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Any = ShapEPipeline
snake_case :Tuple = ["prompt"]
snake_case :Dict = ["prompt"]
snake_case :Tuple = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case :Any = False
@property
def _snake_case ( self ):
return 32
@property
def _snake_case ( self ):
return 32
@property
def _snake_case ( self ):
return self.time_input_dim * 4
@property
def _snake_case ( self ):
return 8
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(UpperCamelCase_ )
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__UpperCAmelCase : int = PriorTransformer(**UpperCamelCase_ )
return model
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : int = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__UpperCAmelCase : Optional[Any] = ShapERenderer(**UpperCamelCase_ )
return model
def _snake_case ( self ):
__UpperCAmelCase : Any = self.dummy_prior
__UpperCAmelCase : Tuple = self.dummy_text_encoder
__UpperCAmelCase : List[Any] = self.dummy_tokenizer
__UpperCAmelCase : Dict = self.dummy_renderer
__UpperCAmelCase : Optional[Any] = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__UpperCAmelCase : Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : Optional[Any] = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : List[str] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Optional[int] = self.pipeline_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__UpperCAmelCase : Any = output.images[0]
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__UpperCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
__UpperCAmelCase : int = torch_device == "cpu"
__UpperCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = self.pipeline_class(**UpperCamelCase_ )
__UpperCAmelCase : int = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__UpperCAmelCase : Dict = batch_size * [inputs[key]]
__UpperCAmelCase : Any = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A (unittest.TestCase ):
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
__UpperCAmelCase : Tuple = ShapEPipeline.from_pretrained("openai/shap-e" )
__UpperCAmelCase : Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = pipe(
"a shark" , generator=UpperCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''LayoutLMv2FeatureExtractor''']
_SCREAMING_SNAKE_CASE = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 502 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : list[int | str] ):
create_state_space_tree(lowerCamelCase_ , [] , 0 , [0 for i in range(len(lowerCamelCase_ ) )] )
def _lowerCAmelCase ( lowerCamelCase_ : list[int | str] , lowerCamelCase_ : list[int | str] , lowerCamelCase_ : int , lowerCamelCase_ : list[int] , ):
if index == len(lowerCamelCase_ ):
print(lowerCamelCase_ )
return
for i in range(len(lowerCamelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__lowercase = True
create_state_space_tree(lowerCamelCase_ , lowerCamelCase_ , index + 1 , lowerCamelCase_ )
current_sequence.pop()
__lowercase = False
_SCREAMING_SNAKE_CASE = [3, 1, 2, 4]
generate_all_permutations(sequence)
_SCREAMING_SNAKE_CASE = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 502 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "AutoImageProcessor"
SCREAMING_SNAKE_CASE = "AutoTokenizer"
def __init__( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : str ):
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__A = self.image_processor
def __call__( self : Optional[Any] , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : int ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__A = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
__A = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
__A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 702 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __lowercase : list[int] , __lowercase : int ) -> list[list[int]]:
"""simple docstring"""
__A = []
__A = []
__A = 0
__A = sum(__lowercase )
create_state_space_tree(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
return result
def _SCREAMING_SNAKE_CASE ( __lowercase : list[int] , __lowercase : int , __lowercase : int , __lowercase : list[int] , __lowercase : list[list[int]] , __lowercase : int , ) -> None:
"""simple docstring"""
if sum(__lowercase ) > max_sum or (remaining_nums_sum + sum(__lowercase )) < max_sum:
return
if sum(__lowercase ) == max_sum:
result.append(__lowercase )
return
for index in range(__lowercase , len(__lowercase ) ):
create_state_space_tree(
__lowercase , __lowercase , index + 1 , [*path, nums[index]] , __lowercase , remaining_nums_sum - nums[index] , )
__a : str = [3, 34, 4, 12, 5, 2]
__a : Optional[Any] = 9
__a : List[str] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 199 | 0 |
'''simple docstring'''
import random
from typing import Any
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCAmelCase = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
_lowerCAmelCase = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
_lowerCAmelCase , _lowerCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = [0, 1, 2, 3, 4, 5, 6, 7]
_SCREAMING_SNAKE_CASE = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 18 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 296 | 0 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Dict = len(lowerCAmelCase_) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase_ : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase_):
return None
lowerCamelCase_ : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCamelCase_ : str = left
lowerCamelCase_ : List[str] = point
elif point > right:
lowerCamelCase_ : Any = right
lowerCamelCase_ : Union[str, Any] = point
else:
if item < current_item:
lowerCamelCase_ : Optional[int] = point - 1
else:
lowerCamelCase_ : int = point + 1
return None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase_ : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase_):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , point - 1)
else:
return interpolation_search_by_recursion(
lowerCAmelCase_ , lowerCAmelCase_ , point + 1 , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if collection != sorted(lowerCAmelCase_):
raise ValueError("Collection must be ascending sorted")
return True
if __name__ == "__main__":
import sys
__magic_name__ = 0
if debug == 1:
__magic_name__ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
__magic_name__ = 6_7
__magic_name__ = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = "x", UpperCAmelCase = 10**-10, UpperCAmelCase = 1, )-> complex:
"""simple docstring"""
lowercase = symbols(UpperCAmelCase )
lowercase = lambdify(UpperCAmelCase, UpperCAmelCase )
lowercase = lambdify(UpperCAmelCase, diff(UpperCAmelCase, UpperCAmelCase ) )
lowercase = starting_point
while True:
if diff_function(UpperCAmelCase ) != 0:
lowercase = prev_guess - multiplicity * func(UpperCAmelCase ) / diff_function(
UpperCAmelCase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(F"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"{newton_raphson('exp(x) - 1', 10, precision=0.005)}",
)
# Find root of cos(x)
print(F"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 604 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( _A , _A , _A , unittest.TestCase ):
lowercase = StableUnCLIPImgaImgPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def __a ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowercase = 32
lowercase = embedder_hidden_size
# image encoding components
lowercase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowerCamelCase , projection_dim=__lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCamelCase )
lowercase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCamelCase , layers_per_block=1 , upcast_attention=__lowerCamelCase , use_linear_projection=__lowerCamelCase , )
torch.manual_seed(0 )
lowercase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase = AutoencoderKL()
lowercase = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __a ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Dict=0 , __lowerCamelCase : str=True ) -> Tuple:
'''simple docstring'''
if str(__lowerCamelCase ).startswith('''mps''' ):
lowercase = torch.manual_seed(__lowerCamelCase )
else:
lowercase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if pil_image:
lowercase = input_image * 0.5 + 0.5
lowercase = input_image.clamp(0 , 1 )
lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase = DiffusionPipeline.numpy_to_pil(__lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __a ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableUnCLIPImgaImgPipeline(**__lowerCamelCase )
lowercase = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase = self.get_dummy_inputs(__lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowercase = sd_pipe(**__lowerCamelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCamelCase )
def __a ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __a ( self : Any ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowerCamelCase )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def __a ( self : Union[str, Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase = pipe(__lowerCamelCase , '''anime turle''' , generator=__lowerCamelCase , output_type='''np''' )
lowercase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
def __a ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase = pipe(__lowerCamelCase , '''anime turle''' , generator=__lowerCamelCase , output_type='''np''' )
lowercase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
def __a ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
lowercase = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase = pipe(
__lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 604 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __a , __a ) -> list:
'''simple docstring'''
_UpperCamelCase :List[Any] =word.split()
def justify(__a , __a , __a ) -> str:
_UpperCamelCase :Any =max_width - width
_UpperCamelCase :str =len(__a )
if len(__a ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_UpperCamelCase :int =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_UpperCamelCase :List[Any] =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_UpperCamelCase :Any =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__a ):
num_spaces_between_words_list[i] += 1
_UpperCamelCase :int =[]
for i in range(__a ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__a )
_UpperCamelCase :List[str] =[]
_UpperCamelCase :list[str] =[]
_UpperCamelCase :int =0
for word in words:
if width + len(__a ) + len(__a ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__a )
width += len(__a )
else:
# justify the line and add it to result
answer.append(justify(__a , __a , __a ) )
# reset new line and new width
_UpperCamelCase , _UpperCamelCase :Optional[int] =[word], len(__a )
_UpperCamelCase :Union[str, Any] =max_width - width - len(__a )
answer.append(""" """.join(__a ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 512 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : Optional[int] = HfApi()
_lowerCamelCase : Union[str, Any] = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_lowerCamelCase : str = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_lowerCamelCase : List[str] = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_lowerCamelCase : Tuple = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_lowerCamelCase : str = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_lowerCamelCase : Dict = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_lowerCamelCase : int = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_lowerCamelCase : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_lowerCamelCase : Union[str, Any] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_lowerCamelCase : Dict = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_lowerCamelCase : int = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Union[str, Any] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(f"Started running {mod.modelId}!!!")
if mod.modelId.startswith("""CompVis"""):
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
_lowerCamelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(f"{mod.modelId} has passed successfully!!!") | 512 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple:
if isinstance(lowercase__ , torch.Tensor ):
return image
elif isinstance(lowercase__ , PIL.Image.Image ):
lowerCAmelCase = [image]
lowerCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image]
lowerCAmelCase = torch.stack(lowercase__ )
return image
class lowercase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}" )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->str:
# get the original timestep using init_timestep
lowerCAmelCase = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->List[str]:
if not isinstance(SCREAMING_SNAKE_CASE_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
lowerCAmelCase = init_latents.shape
lowerCAmelCase = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
print('''add noise to latents at timestep''' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0.8 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) ->Union[ImagePipelineOutput, Tuple]:
self.check_inputs(SCREAMING_SNAKE_CASE_ )
# 2. Preprocess image
lowerCAmelCase = preprocess(SCREAMING_SNAKE_CASE_ )
# 3. set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=self.device )
lowerCAmelCase , lowerCAmelCase = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
lowerCAmelCase = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# 4. Prepare latent variables
lowerCAmelCase = self.prepare_latents(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.unet.dtype , self.device , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise model_output
lowerCAmelCase = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , use_clipped_model_output=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , ).prev_sample
lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 312 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowerCamelCase__ :
UpperCamelCase__ =BlenderbotConfig
UpperCamelCase__ ={}
UpperCamelCase__ ="gelu"
def __init__( self : Any , lowercase__ : str , lowercase__ : str=13 , lowercase__ : List[str]=7 , lowercase__ : List[Any]=True , lowercase__ : Union[str, Any]=False , lowercase__ : Optional[Any]=99 , lowercase__ : str=32 , lowercase__ : Optional[Any]=2 , lowercase__ : str=4 , lowercase__ : int=37 , lowercase__ : Optional[int]=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Tuple=20 , lowercase__ : str=2 , lowercase__ : Optional[Any]=1 , lowercase__ : Any=0 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Optional[Any] , lowercase__ : Optional[int] ):
_lowerCAmelCase = TFBlenderbotModel(config=_lowercase ).get_decoder()
_lowerCAmelCase = inputs_dict["""input_ids"""]
_lowerCAmelCase = input_ids[:1, :]
_lowerCAmelCase = inputs_dict["""attention_mask"""][:1, :]
_lowerCAmelCase = inputs_dict["""head_mask"""]
_lowerCAmelCase = 1
# first forward pass
_lowerCAmelCase = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase )
_lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase = model(_lowercase , attention_mask=_lowercase )[0]
_lowerCAmelCase = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1e-3 )
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
if attention_mask is None:
_lowerCAmelCase = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,unittest.TestCase ):
UpperCamelCase__ =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCamelCase__ =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ =True
UpperCamelCase__ =False
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = TFBlenderbotModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase )
def SCREAMING_SNAKE_CASE__ ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
@require_tokenizers
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
UpperCamelCase__ =["My friends are cool but they eat too many carbs."]
UpperCamelCase__ ="facebook/blenderbot-400M-distill"
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = self.tokenizer(self.src_text , return_tensors='tf' )
_lowerCAmelCase = self.model.generate(
model_inputs.input_ids , )
_lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowercase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 721 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase: Any = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Tuple = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[Any] = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[Any] = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[Any] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowercase: Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 225 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class snake_case__ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_SCREAMING_SNAKE_CASE : Tuple = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
_SCREAMING_SNAKE_CASE : List[str] = Features({"question": Value("string" ), "context": Value("string" )} )
_SCREAMING_SNAKE_CASE : Optional[int] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
_SCREAMING_SNAKE_CASE : Union[str, Any] = "question"
_SCREAMING_SNAKE_CASE : Optional[int] = "context"
_SCREAMING_SNAKE_CASE : str = "answers"
@property
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 666 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : Tuple = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['ConvNextFeatureExtractor']
_SCREAMING_SNAKE_CASE : Dict = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 710 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = """data2vec-text"""
def __init__( self : List[str] , _UpperCamelCase : List[str]=30_522 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Dict=12 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : Optional[Any]=3_072 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : Dict=1e-12 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Optional[Any]="absolute" , _UpperCamelCase : Any=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : Tuple , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: str = vocab_size
_lowercase: Tuple = hidden_size
_lowercase: Optional[int] = num_hidden_layers
_lowercase: Optional[Any] = num_attention_heads
_lowercase: Any = hidden_act
_lowercase: Any = intermediate_size
_lowercase: List[Any] = hidden_dropout_prob
_lowercase: Optional[int] = attention_probs_dropout_prob
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: str = type_vocab_size
_lowercase: List[Any] = initializer_range
_lowercase: List[str] = layer_norm_eps
_lowercase: int = position_embedding_type
_lowercase: Union[str, Any] = use_cache
_lowercase: Any = classifier_dropout
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Optional[Any]):
if self.task == "multiple-choice":
_lowercase: str = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase: Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 206 | 0 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
assert x is not None
assert y is not None
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
# declaring the array for storing the dp values
_UpperCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_UpperCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
_UpperCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = m, n
while i > 0 and j > 0:
_UpperCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_UpperCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__lowerCAmelCase = "AGGTAB"
__lowerCAmelCase = "GXTXAYB"
__lowerCAmelCase = 4
__lowerCAmelCase = "GTAB"
__lowerCAmelCase , __lowerCAmelCase = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 684 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 1 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : List[str] = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase_ : List[str] = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = True
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = '''PIL.Image.Image'''
UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCamelCase__ = field(default='''Image''' , init=lowercase_ , repr=lowercase_ )
def __call__( self : List[str] ):
return self.pa_type
def lowerCAmelCase_ ( self : Dict ,a__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
a__ = np.array(lowerCamelCase_ )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCamelCase_ ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCamelCase_ )
elif isinstance(lowerCamelCase_ ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCamelCase_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCAmelCase_ ( self : Any ,a__ : dict ,a__ : Any=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
a__ = {}
a__ = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(lowerCamelCase_ ):
a__ = PIL.Image.open(lowerCamelCase_ )
else:
a__ = path.split("::" )[-1]
try:
a__ = string_to_dict(lowerCamelCase_ ,config.HUB_DATASETS_URL )["""repo_id"""]
a__ = token_per_repo_id.get(lowerCamelCase_ )
except ValueError:
a__ = None
with xopen(lowerCamelCase_ ,"rb" ,use_auth_token=lowerCamelCase_ ) as f:
a__ = BytesIO(f.read() )
a__ = PIL.Image.open(bytes_ )
else:
a__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCAmelCase_ ( self : List[Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowerCAmelCase_ ( self : List[Any] ,a__ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
a__ = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.binary() )
a__ = pa.StructArray.from_arrays([bytes_array, storage] ,["bytes", "path"] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
a__ = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.string() )
a__ = pa.StructArray.from_arrays([storage, path_array] ,["bytes", "path"] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
a__ = storage.field("bytes" )
else:
a__ = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
a__ = storage.field("path" )
else:
a__ = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.string() )
a__ = pa.StructArray.from_arrays([bytes_array, path_array] ,["bytes", "path"] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
a__ = pa.array(
[encode_np_array(np.array(lowerCamelCase_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
a__ = pa.array([None] * len(lowerCamelCase_ ) ,type=pa.string() )
a__ = pa.StructArray.from_arrays(
[bytes_array, path_array] ,["bytes", "path"] ,mask=bytes_array.is_null() )
return array_cast(lowerCamelCase_ ,self.pa_type )
def lowerCAmelCase_ ( self : str ,a__ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(a__ : Tuple ):
with xopen(lowerCamelCase_ ,"rb" ) as f:
a__ = f.read()
return bytes_
a__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
a__ = pa.array(
[os.path.basename(lowerCamelCase_ ) if path is not None else None for path in storage.field("path" ).to_pylist()] ,type=pa.string() ,)
a__ = pa.StructArray.from_arrays([bytes_array, path_array] ,["bytes", "path"] ,mask=bytes_array.is_null() )
return array_cast(lowerCamelCase_ ,self.pa_type )
def _lowerCAmelCase ():
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
a__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = BytesIO()
if image.format in list_image_compression_formats():
a__ = image.format
else:
a__ = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(lowerCamelCase_ , format=lowerCamelCase_ )
return buffer.getvalue()
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if hasattr(lowerCamelCase_ , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCamelCase_ )}
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
a__ = array.dtype
a__ = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
a__ = dtype.kind
a__ = dtype.itemsize
a__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
a__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
a__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
a__ = dtype_byteorder + dtype_kind + str(lowerCamelCase_ )
a__ = np.dtype(lowerCamelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
a__ = PIL.Image.fromarray(array.astype(lowerCamelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCamelCase_ )}
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
a__ = first_non_null_value(lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCamelCase_ , np.ndarray ):
a__ = no_op_if_value_is_null(lowerCamelCase_ )
return [obj_to_image_dict_func(lowerCamelCase_ ) for obj in objs]
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
a__ = no_op_if_value_is_null(lowerCamelCase_ )
return [obj_to_image_dict_func(lowerCamelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 713 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if "model" in orig_key:
a__ = orig_key.replace("model." , "" )
if "norm1" in orig_key:
a__ = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
a__ = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
a__ = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
a__ = orig_key.split("." )[0].split("_" )[-1]
a__ = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
a__ = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
a__ = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
a__ = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
a__ = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
a__ = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
a__ = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
a__ = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
a__ = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
a__ = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
a__ = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
a__ = "yoso." + orig_key
return orig_key
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a__ = orig_state_dict.pop(_lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
a__ = val
a__ = orig_state_dict["cls.predictions.decoder.bias"]
a__ = torch.arange(_lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = torch.load(_lowercase , map_location="cpu" )["model_state_dict"]
a__ = YosoConfig.from_json_file(_lowercase )
a__ = YosoForMaskedLM(_lowercase )
a__ = convert_checkpoint_helper(config.max_position_embeddings , _lowercase )
print(model.load_state_dict(_lowercase ) )
model.eval()
model.save_pretrained(_lowercase )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
UpperCamelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase_ : Any = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 394 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase ( __snake_case ):
lowercase = 42
class UpperCAmelCase ( nn.Module ):
def __init__( self : Any , __magic_name__ : List[Any]=3 , __magic_name__ : int=3 , __magic_name__ : int=("DownEncoderBlock2D",) , __magic_name__ : int=(6_4,) , __magic_name__ : Optional[Any]=2 , __magic_name__ : List[Any]=3_2 , __magic_name__ : Tuple="silu" , __magic_name__ : Union[str, Any]=True , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = layers_per_block
UpperCamelCase = torch.nn.Convad(
SCREAMING_SNAKE_CASE__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([] )
# down
UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
UpperCamelCase = i == len(SCREAMING_SNAKE_CASE__ ) - 1
UpperCamelCase = get_down_block(
SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , resnet_groups=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , )
self.down_blocks.append(SCREAMING_SNAKE_CASE__ )
# mid
UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , )
# out
UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE__ , eps=1e-6 )
UpperCamelCase = nn.SiLU()
UpperCamelCase = 2 * out_channels if double_z else out_channels
UpperCamelCase = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE__ , 3 , padding=1 )
UpperCamelCase = False
def lowerCamelCase_ ( self : Any , __magic_name__ : Tuple ):
"""simple docstring"""
UpperCamelCase = x
UpperCamelCase = self.conv_in(SCREAMING_SNAKE_CASE__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__magic_name__ : List[str] ):
def custom_forward(*__magic_name__ : int ):
return module(*SCREAMING_SNAKE_CASE__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
else:
for down_block in self.down_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ )
else:
# down
for down_block in self.down_blocks:
UpperCamelCase = down_block(SCREAMING_SNAKE_CASE__ )
# middle
UpperCamelCase = self.mid_block(SCREAMING_SNAKE_CASE__ )
# post-process
UpperCamelCase = self.conv_norm_out(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.conv_act(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.conv_out(SCREAMING_SNAKE_CASE__ )
return sample
class UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] , __magic_name__ : Union[str, Any]=3 , __magic_name__ : str=3 , __magic_name__ : Tuple=("UpDecoderBlock2D",) , __magic_name__ : Optional[int]=(6_4,) , __magic_name__ : str=2 , __magic_name__ : Tuple=3_2 , __magic_name__ : List[Any]="silu" , __magic_name__ : Union[str, Any]="group" , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = layers_per_block
UpperCamelCase = nn.Convad(
SCREAMING_SNAKE_CASE__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([] )
UpperCamelCase = in_channels if norm_type == """spatial""" else None
# mid
UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , )
# up
UpperCamelCase = list(reversed(SCREAMING_SNAKE_CASE__ ) )
UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase = output_channel
UpperCamelCase = reversed_block_out_channels[i]
UpperCamelCase = i == len(SCREAMING_SNAKE_CASE__ ) - 1
UpperCamelCase = get_up_block(
SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , prev_output_channel=SCREAMING_SNAKE_CASE__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , resnet_groups=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , resnet_time_scale_shift=SCREAMING_SNAKE_CASE__ , )
self.up_blocks.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = output_channel
# out
if norm_type == "spatial":
UpperCamelCase = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE__ , eps=1e-6 )
UpperCamelCase = nn.SiLU()
UpperCamelCase = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE__ , 3 , padding=1 )
UpperCamelCase = False
def lowerCamelCase_ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase = z
UpperCamelCase = self.conv_in(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__magic_name__ : Optional[int] ):
def custom_forward(*__magic_name__ : List[str] ):
return module(*SCREAMING_SNAKE_CASE__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = sample.to(SCREAMING_SNAKE_CASE__ )
# up
for up_block in self.up_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
else:
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = sample.to(SCREAMING_SNAKE_CASE__ )
# up
for up_block in self.up_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# middle
UpperCamelCase = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = sample.to(SCREAMING_SNAKE_CASE__ )
# up
for up_block in self.up_blocks:
UpperCamelCase = up_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# post-process
if latent_embeds is None:
UpperCamelCase = self.conv_norm_out(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = self.conv_norm_out(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.conv_act(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.conv_out(SCREAMING_SNAKE_CASE__ )
return sample
class UpperCAmelCase ( nn.Module ):
def __init__( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[Any]=None , __magic_name__ : str="random" , __magic_name__ : Union[str, Any]=False , __magic_name__ : Union[str, Any]=True ):
"""simple docstring"""
super().__init__()
UpperCamelCase = n_e
UpperCamelCase = vq_embed_dim
UpperCamelCase = beta
UpperCamelCase = legacy
UpperCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCamelCase = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
UpperCamelCase = self.used.shape[0]
UpperCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCamelCase = self.re_embed
UpperCamelCase = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
UpperCamelCase = n_e
UpperCamelCase = sane_index_shape
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = inds.shape
assert len(SCREAMING_SNAKE_CASE__ ) > 1
UpperCamelCase = inds.reshape(ishape[0] , -1 )
UpperCamelCase = self.used.to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
UpperCamelCase = match.argmax(-1 )
UpperCamelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCamelCase = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = inds.shape
assert len(SCREAMING_SNAKE_CASE__ ) > 1
UpperCamelCase = inds.reshape(ishape[0] , -1 )
UpperCamelCase = self.used.to(SCREAMING_SNAKE_CASE__ )
if self.re_embed > self.used.shape[0]: # extra token
UpperCamelCase = 0 # simply set to zero
UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE__ )
return back.reshape(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( self : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCamelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCamelCase = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE__ , self.embedding.weight ) , dim=1 )
UpperCamelCase = self.embedding(SCREAMING_SNAKE_CASE__ ).view(z.shape )
UpperCamelCase = None
UpperCamelCase = None
# compute loss for embedding
if not self.legacy:
UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCamelCase = self.remap_to_used(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Dict , __magic_name__ : Any , __magic_name__ : List[Any] ):
"""simple docstring"""
if self.remap is not None:
UpperCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCamelCase = self.unmap_to_all(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCamelCase = self.embedding(SCREAMING_SNAKE_CASE__ )
if shape is not None:
UpperCamelCase = z_q.view(SCREAMING_SNAKE_CASE__ )
# reshape back to match original input shape
UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase ( __snake_case ):
def __init__( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : Optional[int]=False ):
"""simple docstring"""
UpperCamelCase = parameters
UpperCamelCase , UpperCamelCase = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 , dim=1 )
UpperCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCamelCase = deterministic
UpperCamelCase = torch.exp(0.5 * self.logvar )
UpperCamelCase = torch.exp(self.logvar )
if self.deterministic:
UpperCamelCase = UpperCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Any , __magic_name__ : Optional[torch.Generator] = None ):
"""simple docstring"""
UpperCamelCase = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE__ , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCamelCase = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : str , __magic_name__ : List[str]=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : int , __magic_name__ : int , __magic_name__ : Union[str, Any]=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCamelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self.mean
| 386 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict ="roberta"
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=5_02_65 , SCREAMING_SNAKE_CASE__ : List[str]=7_68 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : int=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 282 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 545 |
"""simple docstring"""
UpperCAmelCase__ : Dict = [
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
while place < len(_snake_case ):
if (place + 1 < len(_snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : List[Any] = divmod(_snake_case ,_snake_case )
result.append(roman * factor )
if number == 0:
break
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 545 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__a :int = logging.get_logger(__name__)
def __snake_case ( ):
"""simple docstring"""
A_ = os.getenv("SM_HP_MP_PARAMETERS" ,"{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
A_ = json.loads(__UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
A_ = os.getenv("SM_FRAMEWORK_PARAMS" ,"{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
A_ = json.loads(__UpperCamelCase )
if not mpi_options.get("sagemaker_mpi_enabled" ,__UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _a ( UpperCamelCase__ ):
"""simple docstring"""
_lowerCamelCase : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def __A ( self : Tuple ):
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowerCAmelCase__ , )
@cached_property
def __A ( self : Union[str, Any] ):
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
A_ = torch.device("cpu" )
A_ = 0
elif is_sagemaker_model_parallel_available():
A_ = smp.local_rank()
A_ = torch.device("cuda" , lowerCAmelCase__ )
A_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
A_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
A_ = torch.device("cuda" , self.local_rank )
A_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
A_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
A_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
A_ = torch.device("cuda" , self.local_rank )
A_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowerCAmelCase__ )
return device
@property
def __A ( self : str ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __A ( self : Any ):
return not is_sagemaker_model_parallel_available()
@property
def __A ( self : List[str] ):
return False | 86 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
UpperCAmelCase : str = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
UpperCAmelCase : Union[str, Any] = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
UpperCAmelCase : List[str] = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase : Tuple = [0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase : Optional[int] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 563 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Any:
"""simple docstring"""
UpperCamelCase :int = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCamelCase :int = MaskFormerConfig(backbone_config=__magic_name__ )
UpperCamelCase :Any = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
UpperCamelCase :str = 847
UpperCamelCase :Union[str, Any] = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
UpperCamelCase :Union[str, Any] = 150
UpperCamelCase :str = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
UpperCamelCase :List[Any] = 171
UpperCamelCase :Optional[Any] = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
UpperCamelCase :Dict = 133
UpperCamelCase :int = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
UpperCamelCase :Optional[int] = 19
UpperCamelCase :Dict = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
UpperCamelCase :Union[str, Any] = 65
UpperCamelCase :Any = """mapillary-vistas-id2label.json"""
UpperCamelCase :Optional[int] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase :List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict ) -> str:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCamelCase :int = dct.pop(__magic_name__ )
UpperCamelCase :int = val
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase :List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase :str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase :str = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCamelCase :List[Any] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase :str = in_proj_weight[:dim, :]
UpperCamelCase :int = in_proj_bias[: dim]
UpperCamelCase :List[str] = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase :Any = in_proj_bias[
dim : dim * 2
]
UpperCamelCase :str = in_proj_weight[
-dim :, :
]
UpperCamelCase :Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase :str = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase :Tuple = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCamelCase :Optional[Any] = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase :int = in_proj_weight[: hidden_size, :]
UpperCamelCase :Optional[int] = in_proj_bias[:config.hidden_size]
UpperCamelCase :str = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase :Tuple = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase :Optional[Any] = in_proj_weight[-hidden_size :, :]
UpperCamelCase :Tuple = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase :int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCamelCase :Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase :Union[str, Any] = in_proj_weight[: hidden_size, :]
UpperCamelCase :Any = in_proj_bias[:config.hidden_size]
UpperCamelCase :Any = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase :str = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase :Tuple = in_proj_weight[-hidden_size :, :]
UpperCamelCase :int = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE_ ( ) -> torch.Tensor:
"""simple docstring"""
UpperCamelCase :List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase :Optional[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : str , __magic_name__ : bool = False ) -> Any:
"""simple docstring"""
UpperCamelCase :Optional[int] = get_maskformer_config(__magic_name__ )
# load original state_dict
with open(__magic_name__ , """rb""" ) as f:
UpperCamelCase :List[Any] = pickle.load(__magic_name__ )
UpperCamelCase :Dict = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCamelCase :Tuple = create_rename_keys(__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_swin_q_k_v(__magic_name__ , config.backbone_config )
read_in_decoder_q_k_v(__magic_name__ , __magic_name__ )
# update to torch tensors
for key, value in state_dict.items():
UpperCamelCase :Union[str, Any] = torch.from_numpy(__magic_name__ )
# load 🤗 model
UpperCamelCase :Optional[int] = MaskFormerForInstanceSegmentation(__magic_name__ )
model.eval()
for name, param in model.named_parameters():
print(__magic_name__ , param.shape )
UpperCamelCase :List[str] = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__magic_name__ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCamelCase :List[str] = prepare_img()
if "vistas" in model_name:
UpperCamelCase :List[str] = 65
elif "cityscapes" in model_name:
UpperCamelCase :Union[str, Any] = 6_5535
else:
UpperCamelCase :Optional[int] = 255
UpperCamelCase :Optional[int] = True if """ade""" in model_name else False
UpperCamelCase :Dict = MaskFormerImageProcessor(ignore_index=__magic_name__ , reduce_labels=__magic_name__ )
UpperCamelCase :Tuple = image_processor(__magic_name__ , return_tensors="""pt""" )
UpperCamelCase :int = model(**__magic_name__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCamelCase :Union[str, Any] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 712 |
import random
from typing import Any
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list ) -> list[Any]:
"""simple docstring"""
for _ in range(len(__magic_name__ ) ):
UpperCamelCase :Dict = random.randint(0 , len(__magic_name__ ) - 1 )
UpperCamelCase :List[str] = random.randint(0 , len(__magic_name__ ) - 1 )
UpperCamelCase , UpperCamelCase :List[Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : str = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 590 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__A = {'bert_for_seq_generation': 512}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = []
A_ = ["input_ids", "attention_mask"]
def __init__( self: List[Any] , __A: Optional[int] , __A: List[str]="<s>" , __A: Dict="</s>" , __A: List[Any]="<unk>" , __A: List[str]="<pad>" , __A: int="<::::>" , __A: Optional[Dict[str, Any]] = None , **__A: Tuple , ) -> None:
_A = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sep_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def __A ( self: str ) -> Dict:
return self.sp_model.get_piece_size()
def __A ( self: List[str] ) -> Union[str, Any]:
_A = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: int ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self: List[Any] , __A: Union[str, Any] ) -> Dict:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self: List[str] , __A: str ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def __A ( self: Union[str, Any] , __A: Dict ) -> List[Any]:
return self.sp_model.piece_to_id(__A )
def __A ( self: Any , __A: Union[str, Any] ) -> Optional[int]:
_A = self.sp_model.IdToPiece(__A )
return token
def __A ( self: Tuple , __A: int ) -> Dict:
_A = []
_A = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__A ) + token
_A = []
else:
current_sub_tokens.append(__A )
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __A ( self: Any , __A: str , __A: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 484 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __A ( _lowercase ):
'''simple docstring'''
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_A = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
_A = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
_A = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
_A = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
_A = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
_A = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
_A = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
_A = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
_A = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
_A = key.replace('''image_encoder.module''' , '''flava.image_model''' )
_A = key.replace('''text_encoder.module''' , '''flava.text_model''' )
_A = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
_A = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
_A = key.replace('''text_projection''' , '''flava.text_projection''' )
_A = key.replace('''image_projection''' , '''flava.image_projection''' )
_A = value.float()
for key, value in codebook_state_dict.items():
_A = value
return upgrade
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
if config_path is not None:
_A = FlavaConfig.from_pretrained(_lowercase )
else:
_A = FlavaConfig()
_A = FlavaForPreTraining(_lowercase ).eval()
_A = convert_dalle_checkpoint(_lowercase , _lowercase , save_checkpoint=_lowercase )
if os.path.exists(_lowercase ):
_A = torch.load(_lowercase , map_location='''cpu''' )
else:
_A = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )
_A = upgrade_state_dict(_lowercase , _lowercase )
hf_model.load_state_dict(_lowercase )
_A = hf_model.state_dict()
_A = count_parameters(_lowercase )
_A = count_parameters(_lowercase ) + count_parameters(_lowercase )
assert torch.allclose(_lowercase , _lowercase , atol=1e-3 )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__A = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 484 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , lowercase : Union[str, Any] , lowercase : Dict=1_3 , lowercase : Optional[int]=3_0 , lowercase : Optional[Any]=2 , lowercase : Optional[int]=3 , lowercase : str=True , lowercase : Optional[int]=True , lowercase : Union[str, Any]=3_2 , lowercase : Dict=5 , lowercase : str=4 , lowercase : Tuple=3_7 , lowercase : List[str]="gelu" , lowercase : Optional[int]=0.1 , lowercase : int=0.1 , lowercase : Optional[Any]=1_0 , lowercase : Union[str, Any]=0.0_2 , lowercase : Optional[Any]=3 , lowercase : str=0.6 , lowercase : str=None , ) -> int:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = mask_ratio
UpperCamelCase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def A ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Any , lowercase : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = ViTMAEModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : Dict , lowercase : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = ViTMAEForPreTraining(lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = model(lowercase )
UpperCamelCase__ = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = ViTMAEForPreTraining(lowercase )
model.to(lowercase )
model.eval()
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(lowercase )
UpperCamelCase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
__a : Optional[int] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__a : Union[str, Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
__a : Any = False
__a : str = False
__a : List[str] = False
__a : int = False
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = ViTMAEModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=3_7 )
def A ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def A ( self : Dict ) -> Optional[int]:
'''simple docstring'''
pass
def A ( self : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(lowercase )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : str ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : str ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ = torch.from_numpy(lowercase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ = pt_noise
super().check_pt_tf_models(lowercase , lowercase , lowercase )
def A ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(lowercase )
model.to(lowercase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(lowercase , lowercase ) )
UpperCamelCase__ = outputs[0].cpu().numpy()
UpperCamelCase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
UpperCamelCase__ = model_class.from_pretrained(lowercase )
model.to(lowercase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(lowercase , lowercase ) )
# Make sure we don't have nans
UpperCamelCase__ = after_outputs[0].cpu().numpy()
UpperCamelCase__ = 0
UpperCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def A ( self : Optional[int] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def A ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A ( self : int ) -> Tuple:
'''simple docstring'''
pass
@slow
def A ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = ViTMAEModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowercase )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ = ViTMAEConfig()
UpperCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**lowercase , noise=torch.from_numpy(lowercase ).to(device=lowercase ) )
# verify the logits
UpperCamelCase__ = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , lowercase )
UpperCamelCase__ = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowercase ) , atol=1e-4 ) )
| 265 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Optional[Any] = ["pixel_values"]
def __init__( self : str , lowercase : bool = True , lowercase : int = 3_2 , lowercase : List[Any]=PILImageResampling.BILINEAR , lowercase : bool = True , **lowercase : str , ) -> None:
'''simple docstring'''
UpperCamelCase__ = do_resize
UpperCamelCase__ = do_rescale
UpperCamelCase__ = size_divisor
UpperCamelCase__ = resample
super().__init__(**lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : int , lowercase : List[Any] , lowercase : Optional[ChannelDimension] = None , **lowercase : Any ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = get_image_size(lowercase )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase__ = height // size_divisor * size_divisor
UpperCamelCase__ = width // size_divisor * size_divisor
UpperCamelCase__ = resize(lowercase , (new_h, new_w) , resample=lowercase , data_format=lowercase , **lowercase )
return image
def A ( self : int , lowercase : np.ndarray , lowercase : float , lowercase : Optional[ChannelDimension] = None , **lowercase : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(image=lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : int , lowercase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , lowercase : Optional[bool] = None , lowercase : Optional[int] = None , lowercase : Optional[Any]=None , lowercase : Optional[bool] = None , lowercase : Optional[Union[TensorType, str]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : str , ) -> BatchFeature:
'''simple docstring'''
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
UpperCamelCase__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(lowercase ) for img in images]
if do_resize:
UpperCamelCase__ = [self.resize(lowercase , size_divisor=lowercase , resample=lowercase ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(lowercase , scale=1 / 2_5_5 ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCamelCase__ = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 265 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
def __A ( self : Dict ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=lowerCAmelCase , )
assert hasattr(self , "env" )
def __A ( self : Union[str, Any] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = {
"enabled": True,
"processes_per_host": 8,
}
UpperCAmelCase_ = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
UpperCAmelCase_ = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
UpperCAmelCase_ = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version="py36" , )
def __A ( self : Optional[int] , lowerCAmelCase : List[Any] ):
'''simple docstring'''
TrainingJobAnalytics(lowerCAmelCase ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def __A ( self : str , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCAmelCase ) | 162 |
from copy import deepcopy
class __UpperCamelCase :
def __init__( self : List[str] , lowerCAmelCase : list[int] | None = None , lowerCAmelCase : int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
UpperCAmelCase_ = size
UpperCAmelCase_ = [0] * size
elif arr is not None:
self.init(lowerCAmelCase )
else:
raise ValueError("Either arr or size must be specified" )
def __A ( self : Tuple , lowerCAmelCase : list[int] ):
'''simple docstring'''
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = deepcopy(lowerCAmelCase )
for i in range(1 , self.size ):
UpperCAmelCase_ = self.next_(lowerCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCAmelCase_ = self.next_(lowerCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __A ( lowerCAmelCase : int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def __A ( lowerCAmelCase : int ):
'''simple docstring'''
return index - (index & (-index))
def __A ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCAmelCase_ = self.next_(lowerCAmelCase )
def __A ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
self.add(lowerCAmelCase , value - self.get(lowerCAmelCase ) )
def __A ( self : Tuple , lowerCAmelCase : int ):
'''simple docstring'''
if right == 0:
return 0
UpperCAmelCase_ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCAmelCase_ = self.prev(lowerCAmelCase )
return result
def __A ( self : Any , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
return self.prefix(lowerCAmelCase ) - self.prefix(lowerCAmelCase )
def __A ( self : Any , lowerCAmelCase : int ):
'''simple docstring'''
return self.query(lowerCAmelCase , index + 1 )
def __A ( self : List[str] , lowerCAmelCase : int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
UpperCAmelCase_ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCAmelCase_ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 162 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCAmelCase__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCAmelCase__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCAmelCase__ : set[int] = {ord(char) for char in VALID_CHARS}
UpperCAmelCase__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = ""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for keychar, cipherchar in zip(cycle(__UpperCamelCase ) ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCamelCase )
return decoded
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : list[str] = []
for key in product(__UpperCamelCase ,repeat=3 ):
SCREAMING_SNAKE_CASE__ : Dict = try_key(__UpperCamelCase ,__UpperCamelCase )
if encoded is not None:
possibles.append(__UpperCamelCase )
return possibles
def lowercase_ ( _snake_case ,_snake_case ):
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase_ ( _snake_case = "p059_cipher.txt" ):
SCREAMING_SNAKE_CASE__ : list[int]
SCREAMING_SNAKE_CASE__ : list[str]
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : str = Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding="""utf-8""" )
SCREAMING_SNAKE_CASE__ : int = [int(__UpperCamelCase ) for number in data.strip().split(""",""" )]
SCREAMING_SNAKE_CASE__ : Tuple = filter_valid_chars(__UpperCamelCase )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE__ : Optional[int] = filter_common_word(__UpperCamelCase ,__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
break
SCREAMING_SNAKE_CASE__ : Optional[int] = possibles[0]
return sum(ord(__UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 721 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = '''detr'''
__UpperCamelCase : List[Any] = ['''past_key_values''']
__UpperCamelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__="resnet50" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE__ : Any = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE__ : int = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = None, None, None
SCREAMING_SNAKE_CASE__ : Optional[int] = use_timm_backbone
SCREAMING_SNAKE_CASE__ : Tuple = backbone_config
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = num_queries
SCREAMING_SNAKE_CASE__ : Optional[int] = d_model
SCREAMING_SNAKE_CASE__ : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : str = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = dropout
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE__ : Tuple = activation_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Any = init_std
SCREAMING_SNAKE_CASE__ : Dict = init_xavier_std
SCREAMING_SNAKE_CASE__ : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Dict = encoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : List[str] = backbone
SCREAMING_SNAKE_CASE__ : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Any = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : Tuple = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Any = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return cls(backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict[str, any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ : Any = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.__class__.model_type
return output
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Any = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-5
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 545 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_UpperCamelCase : str = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_UpperCamelCase : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
_UpperCamelCase : Tuple = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = CHRF.CHAR_ORDER , _SCREAMING_SNAKE_CASE = CHRF.WORD_ORDER , _SCREAMING_SNAKE_CASE = CHRF.BETA , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , ):
'''simple docstring'''
lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
lowerCAmelCase = CHRF(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = sb_chrf.corpus_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 284 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : str = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
class lowercase__ :
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(__SCREAMING_SNAKE_CASE ) != 0:
UpperCamelCase : Dict = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__SCREAMING_SNAKE_CASE ) != cols:
raise error
for value in row:
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise error
UpperCamelCase : int = rows
else:
UpperCamelCase : Optional[int] = []
def _a ( self ):
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _a ( self ):
'''simple docstring'''
return len(self.rows )
@property
def _a ( self ):
'''simple docstring'''
return len(self.rows[0] )
@property
def _a ( self ):
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def _a ( self ):
'''simple docstring'''
return self.order[0] == self.order[1]
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__SCREAMING_SNAKE_CASE )
def _a ( self ):
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _a ( self ):
'''simple docstring'''
return bool(self.determinant() )
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__SCREAMING_SNAKE_CASE ).determinant()
def _a ( self , _A , _A ):
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return -1 * self.get_minor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self ):
'''simple docstring'''
return Matrix(
[
[self.get_minor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _a ( self ):
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__SCREAMING_SNAKE_CASE )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
'''simple docstring'''
return str(self.rows )
def __str__( self ):
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(__SCREAMING_SNAKE_CASE ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def _a ( self , _A , _A = None ):
'''simple docstring'''
UpperCamelCase : Dict = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise type_error
for value in row:
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise type_error
if len(__SCREAMING_SNAKE_CASE ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.rows[0:position] + [row] + self.rows[position:]
def _a ( self , _A , _A = None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise type_error
for value in column:
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise type_error
if len(__SCREAMING_SNAKE_CASE ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
UpperCamelCase : List[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCamelCase : List[str] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , _A ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , _A ):
'''simple docstring'''
return not self == other
def __neg__( self ):
'''simple docstring'''
return self * -1
def __add__( self , _A ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , _A ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , _A ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , _A ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
UpperCamelCase : Dict = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _a ( cls , _A , _A ):
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(__SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True ) -> int:
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
return int((number_a + number_a) / 2 )
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> None:
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(_lowerCamelCase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
lowerCamelCase_ = lower
lowerCamelCase_ = higher
lowerCamelCase_ = []
while True:
lowerCamelCase_ = get_avg(_lowerCamelCase , _lowerCamelCase )
last_numbers.append(_lowerCamelCase )
if answer(_lowerCamelCase ) == "low":
lowerCamelCase_ = number
elif answer(_lowerCamelCase ) == "high":
lowerCamelCase_ = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def lowerCamelCase__ ( ) -> None:
lowerCamelCase_ = int(input('Enter lower value : ' ).strip() )
lowerCamelCase_ = int(input('Enter high value : ' ).strip() )
lowerCamelCase_ = int(input('Enter value to guess : ' ).strip() )
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 549 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
_UpperCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_UpperCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Tuple )->int:
_UpperCAmelCase = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_UpperCAmelCase = test_metrics
@require_cpu
def lowercase__ ( self : Any )->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowercase__ ( self : List[str] )->List[str]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowercase__ ( self : List[Any] )->Dict:
self.test_metrics.main()
@require_multi_gpu
def lowercase__ ( self : str )->int:
print(F'Found {torch.cuda.device_count()} devices.' )
_UpperCAmelCase = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() )
| 95 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : str =logging.get_logger(__name__)
class snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] ='''maskformer-swin'''
SCREAMING_SNAKE_CASE_ : List[str] ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , __A : List[str]=2_2_4 , __A : Union[str, Any]=4 , __A : Any=3 , __A : List[Any]=9_6 , __A : List[Any]=[2, 2, 6, 2] , __A : Dict=[3, 6, 1_2, 2_4] , __A : str=7 , __A : int=4.0 , __A : Union[str, Any]=True , __A : Dict=0.0 , __A : Any=0.0 , __A : List[str]=0.1 , __A : Union[str, Any]="gelu" , __A : Tuple=False , __A : Union[str, Any]=0.02 , __A : List[Any]=1e-5 , __A : int=None , __A : List[str]=None , **__A : Tuple , ):
super().__init__(**A_ )
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = embed_dim
__UpperCamelCase = depths
__UpperCamelCase = len(A_ )
__UpperCamelCase = num_heads
__UpperCamelCase = window_size
__UpperCamelCase = mlp_ratio
__UpperCamelCase = qkv_bias
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = drop_path_rate
__UpperCamelCase = hidden_act
__UpperCamelCase = use_absolute_embeddings
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCamelCase = int(embed_dim * 2 ** (len(A_ ) - 1) )
__UpperCamelCase = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(A_ ) + 1 )]
__UpperCamelCase , __UpperCamelCase = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 399 | """simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowercase : Optional[Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowercase : str = concatenate_datasets
__lowercase : Tuple = DownloadConfig
__lowercase : Dict = DownloadManager
__lowercase : Dict = DownloadMode
__lowercase : Any = DownloadConfig
__lowercase : Optional[int] = DownloadMode
__lowercase : Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 564 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowerCamelCase:
lowercase_ : float
lowercase_ : TreeNode | None = None
lowercase_ : TreeNode | None = None
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
# Validation
def is_valid_tree(lowerCamelCase_ ) -> bool:
if node is None:
return True
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCamelCase_ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCamelCase_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCamelCase_ )
)
return is_binary_search_tree_recursive_check(lowerCamelCase_ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowerCamelCase( unittest.TestCase ):
def __init__( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = parent
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {}
def UpperCamelCase_( ) -> List[str]:
_lowercase : int = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
_lowercase : List[Any] = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Tuple = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = MarkupLMFeatureExtractionTester(self)
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self.feature_extraction_class()
# Test not batched input
_lowercase : List[Any] = get_html_strings()[0]
_lowercase : Optional[Any] = feature_extractor(lowerCamelCase)
# fmt: off
_lowercase : str = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
_lowercase : Dict = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes, lowerCamelCase)
self.assertEqual(encoding.xpaths, lowerCamelCase)
# Test batched
_lowercase : Optional[int] = get_html_strings()
_lowercase : Tuple = feature_extractor(lowerCamelCase)
# fmt: off
_lowercase : List[Any] = expected_nodes + [['My First Heading', 'My first paragraph.']]
_lowercase : List[str] = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes), 2)
self.assertEqual(len(encoding.xpaths), 2)
self.assertEqual(encoding.nodes, lowerCamelCase)
self.assertEqual(encoding.xpaths, lowerCamelCase)
| 354 | 1 |
from __future__ import annotations
def _UpperCAmelCase ( A , A = None , A = None ):
'''simple docstring'''
if start is None:
UpperCAmelCase__ =0
if end is None:
UpperCAmelCase__ =len(snake_case__ ) - 1
if start >= end:
return
UpperCAmelCase__ =(start + end) // 2
slowsort(snake_case__ , snake_case__ , snake_case__ )
slowsort(snake_case__ , mid + 1 , snake_case__ )
if sequence[end] < sequence[mid]:
UpperCAmelCase__ =sequence[mid], sequence[end]
slowsort(snake_case__ , snake_case__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 625 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : str = LEDTokenizer
_A : List[Any] = LEDTokenizerFast
_A : Dict = True
def lowerCamelCase(self ):
super().setUp()
A_ : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A_ : Any = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A_ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ : List[str] = {"""unk_token""": """<unk>"""}
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase(self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase(self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase(self ):
A_ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A_ : Any = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[int] = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
A_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCamelCase(self ):
A_ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Tuple = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , lowerCAmelCase_ )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertNotIn("""labels""" , lowerCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" , lowerCAmelCase_ )
@require_torch
def lowerCamelCase(self ):
A_ : Tuple = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[int] = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase(self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[Any] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCamelCase(self ):
A_ : Dict = ["""A long paragraph for summarization."""]
A_ : Any = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : int = tokenizer(lowerCAmelCase_ , return_tensors="""pt""" )
A_ : Optional[int] = tokenizer(text_target=lowerCAmelCase_ , return_tensors="""pt""" )
A_ : str = inputs["""input_ids"""]
A_ : Tuple = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase(self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : str = ["""Summary of the text.""", """Another summary."""]
A_ : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
A_ : Optional[Any] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
A_ : Any = [[0] * len(lowerCAmelCase_ ) for x in encoded_output["""input_ids"""]]
A_ : Tuple = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowerCAmelCase_ )
def lowerCamelCase(self ):
pass
def lowerCamelCase(self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : Optional[Any] = """A, <mask> AllenNLP sentence."""
A_ : List[Any] = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
A_ : Any = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 180 | 0 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCAmelCase_ : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCAmelCase_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _lowerCamelCase ( lowercase : Vector , lowercase : Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(_snake_case ) - np.asarray(_snake_case )) ** 2 ) )
def _lowerCamelCase ( lowercase : Vector , lowercase : Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(_snake_case , _snake_case ) ) ** (1 / 2)
if __name__ == "__main__":
def _lowerCamelCase ( ) -> None:
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) )
benchmark()
| 692 |
import requests
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> None:
'''simple docstring'''
__magic_name__ : Any = {"Content-Type": "application/json"}
__magic_name__ : Optional[int] = requests.post(_snake_case , json={"text": message_body} , headers=_snake_case )
if response.status_code != 200:
__magic_name__ : List[str] = (
"Request to slack returned an error "
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_snake_case )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 124 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , __snake_case : int , __snake_case : Dict ) -> Optional[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
__magic_name__: str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Dict , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : float = 0.0 , __snake_case : int = 5_0 , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __snake_case ):
__magic_name__: Tuple = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__magic_name__: Any = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__magic_name__: str = randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__magic_name__: Tuple = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__magic_name__: str = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case ).prev_sample
__magic_name__: List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__: List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__: List[str] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 213 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "ClapFeatureExtractor"
UpperCAmelCase__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : int , __snake_case : Any , __snake_case : Union[str, Any] ) -> Optional[Any]:
super().__init__(__snake_case , __snake_case )
def __call__( self : str , __snake_case : int=None , __snake_case : Any=None , __snake_case : str=None , **__snake_case : Any ) -> int:
__magic_name__: Any = kwargs.pop("""sampling_rate""" , __snake_case )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
__magic_name__: List[str] = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if audios is not None:
__magic_name__: Dict = self.feature_extractor(
__snake_case , sampling_rate=__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and audios is not None:
__magic_name__: int = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def lowerCamelCase__ ( self : Optional[Any] , *__snake_case : Optional[int] , **__snake_case : Optional[int] ) -> Optional[int]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : List[str] , *__snake_case : Tuple , **__snake_case : List[str] ) -> Optional[Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__: List[str] = self.tokenizer.model_input_names
__magic_name__: List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 213 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__lowercase : Any =datasets.utils.logging.get_logger(__name__)
@dataclass
class A ( datasets.BuilderConfig ):
_snake_case =None
_snake_case ="utf-8"
_snake_case =None
_snake_case =None
_snake_case =True # deprecated
_snake_case =None # deprecated
_snake_case =10 << 20 # 10MB
_snake_case =None
class A ( datasets.ArrowBasedBuilder ):
_snake_case =JsonConfig
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
UpperCAmelCase_ =self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: int ) -> str:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
UpperCAmelCase_ =dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
UpperCAmelCase_ =data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =[files]
UpperCAmelCase_ =[dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ =[]
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =[files]
UpperCAmelCase_ =[dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase_ =self.config.features.arrow_schema.field(_lowerCAmelCase ).type
UpperCAmelCase_ =pa_table.append_column(_lowerCAmelCase , pa.array([None] * len(_lowerCAmelCase ) , type=_lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ =table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: int ) -> Tuple:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# We keep only the field we are interested in
UpperCAmelCase_ =dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCAmelCase , (list, tuple) ):
UpperCAmelCase_ =set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ ={col: [row.get(_lowerCAmelCase ) for row in dataset] for col in keys}
else:
UpperCAmelCase_ =dataset
UpperCAmelCase_ =pa.Table.from_pydict(_lowerCAmelCase )
yield file_idx, self._cast_table(_lowerCAmelCase )
# If the file has one json object per line
else:
with open(_lowerCAmelCase , "rb" ) as f:
UpperCAmelCase_ =0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase_ =max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase_ =(
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
UpperCAmelCase_ =f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase_ =batch.decode(self.config.encoding , errors=_lowerCAmelCase ).encode("utf-8" )
try:
while True:
try:
UpperCAmelCase_ =paj.read_json(
io.BytesIO(_lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=_lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCAmelCase )
or block_size > len(_lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(_lowerCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase_ =set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ ={col: [row.get(_lowerCAmelCase ) for row in dataset] for col in keys}
UpperCAmelCase_ =pa.Table.from_pydict(_lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCAmelCase )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase )
batch_idx += 1
| 54 |
from importlib import import_module
from .logging import get_logger
UpperCAmelCase : Union[str, Any] = get_logger(__name__)
class _A:
"""simple docstring"""
def __init__( self , _A , _A=None ):
__A : Union[str, Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , _A , getattr(_A , _A ) )
__A : Optional[int] = module._original_module if isinstance(_A , _PatchedModuleObj ) else module
class _A:
"""simple docstring"""
UpperCamelCase : Union[str, Any] = []
def __init__( self , _A , _A , _A , _A=None ):
__A : List[Any] = obj
__A : Dict = target
__A : Optional[int] = new
__A : Optional[Any] = target.split('.' )[0]
__A : Tuple = {}
__A : Dict = attrs or []
def __enter__( self ):
*__A , __A : str = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_A ) ):
try:
__A : Union[str, Any] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__A : Union[str, Any] = getattr(self.obj , _A )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_A , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__A : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , _A , _PatchedModuleObj(_A , attrs=self.attrs ) )
__A : List[str] = getattr(self.obj , _A )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_A , _A , _PatchedModuleObj(getattr(_A , _A , _A ) , attrs=self.attrs ) )
__A : str = getattr(_A , _A )
# finally set the target attribute
setattr(_A , _A , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__A : Union[str, Any] = getattr(import_module('.'.join(_A ) ) , _A )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _A ) is attr_value:
__A : Union[str, Any] = getattr(self.obj , _A )
setattr(self.obj , _A , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__A : Tuple = globals()['__builtins__'][target_attr]
setattr(self.obj , _A , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self , *_A ):
for attr in list(self.original ):
setattr(self.obj , _A , self.original.pop(_A ) )
def UpperCAmelCase_ ( self ):
self.__enter__()
self._active_patches.append(self )
def UpperCAmelCase_ ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 239 | 0 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : str ):
return "".join(chr(ord(_lowerCAmelCase ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 173 | """simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__A = logging.get_logger(__name__)
# General docstring
__A = """RegNetConfig"""
# Base docstring
__A = """facebook/regnet-y-040"""
__A = [1, 10_88, 7, 7]
# Image classification docstring
__A = """facebook/regnet-y-040"""
__A = """tabby, tabby cat"""
__A = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[str] = "relu" , **lowerCamelCase_ : Union[str, Any] , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__a = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=lowerCamelCase_ , strides=lowerCamelCase_ , padding="""VALID""" , groups=lowerCamelCase_ , use_bias=lowerCamelCase_ , name="""convolution""" , )
__a = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
__a = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] ) -> Optional[Any]:
__a = self.convolution(self.padding(lowerCamelCase_ ) )
__a = self.normalization(lowerCamelCase_ )
__a = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : RegNetConfig , **lowerCamelCase_ : Tuple ) -> List[Any]:
super().__init__(**lowerCamelCase_ )
__a = config.num_channels
__a = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCAmelCase_ ( self : Dict , lowerCamelCase_ : Tuple ) -> List[str]:
__a = shape_list(lowerCamelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__a = tf.transpose(lowerCamelCase_ , perm=(0, 2, 3, 1) )
__a = self.embedder(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int = 2 , **lowerCamelCase_ : Optional[int] ) -> Any:
super().__init__(**lowerCamelCase_ )
__a = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=1 , strides=lowerCamelCase_ , use_bias=lowerCamelCase_ , name="""convolution""" )
__a = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(lowerCamelCase_ ) , training=lowerCamelCase_ )
class a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , **lowerCamelCase_ : List[Any] ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
__a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name="""pooler""" )
__a = [
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : Dict ) -> int:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__a = self.pooler(lowerCamelCase_ )
for layer_module in self.attention:
__a = layer_module(lowerCamelCase_ )
__a = hidden_state * pooled
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Optional[int] ) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
__a = in_channels != out_channels or stride != 1
__a = max(1 , out_channels // config.groups_width )
__a = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__a = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name="""layer.2""" ),
]
__a = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Dict , lowerCamelCase_ : List[str] ) -> Tuple:
__a = hidden_state
for layer_module in self.layers:
__a = layer_module(lowerCamelCase_ )
__a = self.shortcut(lowerCamelCase_ )
hidden_state += residual
__a = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Optional[Any] ) -> Dict:
super().__init__(**lowerCamelCase_ )
__a = in_channels != out_channels or stride != 1
__a = max(1 , out_channels // config.groups_width )
__a = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
__a = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name="""layer.3""" ),
]
__a = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
__a = hidden_state
for layer_module in self.layers:
__a = layer_module(lowerCamelCase_ )
__a = self.shortcut(lowerCamelCase_ )
hidden_state += residual
__a = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : List[str] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 2 , **lowerCamelCase_ : List[Any] ) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
__a = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
__a = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , name="""layers.0""" ),
*[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self : Dict , lowerCamelCase_ : List[str] ) -> int:
for layer_module in self.layers:
__a = layer_module(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase_ : RegNetConfig , **lowerCamelCase_ : Any ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
__a = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
__a = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
__a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__a = hidden_states + (hidden_state,)
__a = stage_module(lowerCamelCase_ )
if output_hidden_states:
__a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
@keras_serializable
class a ( tf.keras.layers.Layer ):
A_ : str = RegNetConfig
def __init__( self : List[str] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ) -> Tuple:
super().__init__(**lowerCamelCase_ )
__a = config
__a = TFRegNetEmbeddings(lowerCamelCase_ , name="""embedder""" )
__a = TFRegNetEncoder(lowerCamelCase_ , name="""encoder""" )
__a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name="""pooler""" )
@unpack_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.embedder(lowerCamelCase_ , training=lowerCamelCase_ )
__a = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase_ )
# Change to NCHW output format have uniformity in the modules
__a = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
__a = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__a = tuple([tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a ( A_ ):
A_ : str = RegNetConfig
A_ : Tuple = '''regnet'''
A_ : Tuple = '''pixel_values'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
__A = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
__A = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , A_ , )
class a ( A_ ):
def __init__( self : Tuple , lowerCamelCase_ : RegNetConfig , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[Any] ) -> List[str]:
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
__a = TFRegNetMainLayer(lowerCamelCase_ , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : str , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Any=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.regnet(
pixel_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , A_ , )
class a ( A_ , A_ ):
def __init__( self : int , lowerCamelCase_ : RegNetConfig , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Tuple ) -> Union[str, Any]:
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
__a = config.num_labels
__a = TFRegNetMainLayer(lowerCamelCase_ , name="""regnet""" )
# classification head
__a = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : tf.Tensor = None , lowerCamelCase_ : tf.Tensor = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[int]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.regnet(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
__a = outputs.pooler_output if return_dict else outputs[1]
__a = self.classifier[0](lowerCamelCase_ )
__a = self.classifier[1](lowerCamelCase_ )
__a = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase_ , logits=lowerCamelCase_ )
if not return_dict:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 173 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase__ ( lowerCamelCase_ : int = 1_0_0_0_0_0_0 , lowerCamelCase_ : int = 1_0 ):
__a : Optional[Any] = defaultdict(a__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__a : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__a : Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 47 | """simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase = 6_37_81_37.0
UpperCAmelCase = 6_35_67_52.31_42_45
UpperCAmelCase = 6_378_137
def lowercase ( a__ : float , a__ : float , a__ : float , a__ : float ) -> float:
_UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCamelCase = haversine_distance(a__ , a__ , a__ , a__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCamelCase = (b_lata + b_lata) / 2
_UpperCamelCase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCamelCase = (sin(a__ ) ** 2) * (cos(a__ ) ** 2)
_UpperCamelCase = cos(sigma / 2 ) ** 2
_UpperCamelCase = (sigma - sin(a__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCamelCase = (cos(a__ ) ** 2) * (sin(a__ ) ** 2)
_UpperCamelCase = sin(sigma / 2 ) ** 2
_UpperCamelCase = (sigma + sin(a__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A : Tuple = "sshleifer/bart-tiny-random"
A : List[str] = "patrickvonplaten/t5-tiny-random"
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self ):
return AutoConfig.from_pretrained(_lowercase )
def snake_case ( self ):
__lowerCAmelCase = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def snake_case ( self ):
__lowerCAmelCase = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=_lowercase )
def snake_case ( self ):
__lowerCAmelCase = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=_lowercase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def snake_case ( self ):
__lowerCAmelCase = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def snake_case ( self ):
with self.assertRaises(_lowercase ):
create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=_lowercase , d=_lowercase )
| 719 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=7_68 ):
super().__init__(__a )
__lowerCAmelCase = proj_size
__lowerCAmelCase = CLIPVisionModel(__a )
__lowerCAmelCase = PaintByExampleMapper(__a )
__lowerCAmelCase = nn.LayerNorm(config.hidden_size )
__lowerCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__lowerCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def snake_case ( self , __a , __a=False ):
__lowerCAmelCase = self.model(pixel_values=__a )
__lowerCAmelCase = clip_output.pooler_output
__lowerCAmelCase = self.mapper(latent_states[:, None] )
__lowerCAmelCase = self.final_layer_norm(__a )
__lowerCAmelCase = self.proj_out(__a )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = (config.num_hidden_layers + 1) // 5
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = 1
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(__a , __a , __a , activation_fn="gelu" , attention_bias=__a )
for _ in range(__a )
] )
def snake_case ( self , __a ):
for block in self.blocks:
__lowerCAmelCase = block(__a )
return hidden_states
| 282 | 0 |
'''simple docstring'''
_lowerCAmelCase :List[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCAmelCase :Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCAmelCase :List[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 251 | '''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Tuple = FlaxAutoencoderKL
@property
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : str = (32, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Dict = jax.random.uniform(lowercase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_input
return init_dict, inputs_dict
| 251 | 1 |
'''simple docstring'''
import os
import sys
import transformers
_SCREAMING_SNAKE_CASE = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 706 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "LayoutLMv3ImageProcessor"
snake_case_ = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : str , __snake_case : int=None , __snake_case : List[Any]=None , **__snake_case : Optional[Any] )-> List[Any]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
snake_case = kwargs.pop("""feature_extractor""" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
def __call__( self : Any , __snake_case : int , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[Any] , )-> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
snake_case = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features["""words"""]
snake_case = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
snake_case = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(__snake_case , encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case = images
return encoded_inputs
def lowerCAmelCase ( self : Any , __snake_case : int , __snake_case : Tuple )-> List[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Optional[int] , **__snake_case : Optional[Any] )-> Tuple:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , *__snake_case : Any , **__snake_case : Optional[Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCAmelCase ( self : Union[str, Any] )-> Tuple:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCAmelCase ( self : Any )-> Any:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : int )-> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 517 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : float ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
import os
import sys
import unittest
__UpperCamelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCamelCase : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case__ = find_backend(' if not is_torch_available():' )
self.assertEqual(UpperCamelCase , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(UpperCamelCase , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(UpperCamelCase , 'torch_and_transformers_and_onnx' )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , UpperCamelCase )
self.assertIn('torch_and_transformers' , UpperCamelCase )
self.assertIn('flax_and_transformers' , UpperCamelCase )
self.assertIn('torch_and_transformers_and_onnx' , UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def lowerCAmelCase_ ( self: int ) -> str:
snake_case__ = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(UpperCamelCase , '\nCONSTANT = None\n' )
snake_case__ = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
UpperCamelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case__ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
snake_case__ = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
snake_case__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
snake_case__ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , UpperCamelCase )
| 328 | 0 |
import numpy as np
from PIL import Image
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE_ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE_ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
return updated_arr
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.array(_SCREAMING_SNAKE_CASE )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE_ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE_ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
UpperCamelCase__ : Any = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 620 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( lowerCAmelCase__ ):
def __init__( self , _A , _A , _A , _A = None , ):
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A)
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(','):
SCREAMING_SNAKE_CASE_ = int(_A)
SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items()))
def lowerCAmelCase__ ( self , _A):
if not isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = list(_A)
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ):
SCREAMING_SNAKE_CASE_ = len(_A)
SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1)
SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device)
SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2]
SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0)
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A)
SCREAMING_SNAKE_CASE_ = t
if not torch.is_tensor(_A):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps'
if isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
SCREAMING_SNAKE_CASE_ = self.transformer(
_A , timestep=_A , class_labels=_A).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0)
SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0)
SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1)
else:
SCREAMING_SNAKE_CASE_ = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0)
else:
SCREAMING_SNAKE_CASE_ = latent_model_input
SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample
SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A)
| 620 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class UpperCAmelCase ( __lowerCamelCase ):
a__: Union[str, Any] = """open-llama"""
def __init__( self : Any , lowerCAmelCase : Optional[Any]=10_0000 , lowerCAmelCase : Optional[Any]=4096 , lowerCAmelCase : Union[str, Any]=1_1008 , lowerCAmelCase : Any=32 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : str="silu" , lowerCAmelCase : int=2048 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : str=1E-6 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : int=1 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[str]=False , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Dict , ):
lowercase : Any = vocab_size
lowercase : Optional[Any] = max_position_embeddings
lowercase : int = hidden_size
lowercase : Optional[Any] = intermediate_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
lowercase : Optional[Any] = hidden_act
lowercase : Union[str, Any] = initializer_range
lowercase : List[str] = rms_norm_eps
lowercase : List[str] = use_cache
lowercase : Optional[int] = kwargs.pop(
'''use_memorry_efficient_attention''' , lowerCAmelCase )
lowercase : List[Any] = hidden_dropout_prob
lowercase : Optional[Any] = attention_dropout_prob
lowercase : Any = use_stable_embedding
lowercase : Optional[int] = shared_input_output_embedding
lowercase : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase , )
def _lowerCAmelCase ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
lowercase : Optional[Any] = self.rope_scaling.get('''type''' , lowerCAmelCase )
lowercase : List[str] = self.rope_scaling.get('''factor''' , lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase , lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 583 |
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] ):
lowercase : Union[str, Any] = len(UpperCAmelCase_ )
for i in range(UpperCAmelCase_ ):
for j in range(i + 1 , UpperCAmelCase_ ):
if numbers[j] < numbers[i]:
lowercase , lowercase : int = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 583 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''xlm-prophetnet'''
lowerCamelCase_ = ['''past_key_values''']
lowerCamelCase_ = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self , lowercase = 0.1 , lowercase = "gelu" , lowercase = 3_0_5_2_2 , lowercase = 1_0_2_4 , lowercase = 4_0_9_6 , lowercase = 1_2 , lowercase = 1_6 , lowercase = 4_0_9_6 , lowercase = 1_2 , lowercase = 1_6 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 5_1_2 , lowercase = 0.02 , lowercase = True , lowercase = True , lowercase = 0 , lowercase = 2 , lowercase = 3_2 , lowercase = 1_2_8 , lowercase = False , lowercase = 0.0 , lowercase = True , lowercase = 0 , lowercase = 1 , lowercase = 2 , **lowercase , ):
"""simple docstring"""
A_ : List[str] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : int = encoder_ffn_dim
A_ : Dict = num_encoder_layers
A_ : Dict = num_encoder_attention_heads
A_ : str = decoder_ffn_dim
A_ : str = num_decoder_layers
A_ : Any = num_decoder_attention_heads
A_ : List[Any] = max_position_embeddings
A_ : int = init_std # Normal(0, this parameter)
A_ : Optional[int] = activation_function
# parameters for xlmprophetnet
A_ : Optional[Any] = ngram
A_ : Optional[int] = num_buckets
A_ : List[Any] = relative_max_distance
A_ : str = disable_ngram_loss
A_ : int = eps
# 3 Types of Dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : Tuple = dropout
A_ : Optional[int] = use_cache
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , add_cross_attention=lowercase , decoder_start_token_id=lowercase , **lowercase , )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 70 | def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__a : Any = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__a : int = F'''https://www.google.com/search?q={query}&num=100'''
__a : Optional[Any] = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__a : Tuple = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__a : List[str] = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link) | 534 | def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = len(lowercase )
__lowercase = len(lowercase )
__lowercase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase = True
for i in range(lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase = True
if a[i].islower():
__lowercase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod() | 534 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
A_ = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 28 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ = 20 ):
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 82 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
from math import sqrt
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ = 10_001 ):
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE__ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE__ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 230 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 230 | 1 |
def __snake_case ( _UpperCamelCase=2_81_23 ) -> Optional[Any]:
_a = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_a = set()
_a = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_UpperCamelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 487 |
def __snake_case ( _UpperCamelCase ) -> int:
_a = len(_UpperCamelCase )
_a = sum(_UpperCamelCase )
_a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_a = True
for i in range(1 , s + 1 ):
_a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_a = dp[i][j - 1]
if arr[i - 1] <= j:
_a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_a = s - 2 * j
break
return diff
| 487 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Dict = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
import requests
snake_case : int = '' # <-- Put your OpenWeatherMap appid here!
snake_case : int = 'https://api.openweathermap.org/data/2.5/'
def snake_case__ ( __lowercase = "Chicago" , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def snake_case__ ( __lowercase = "Kolkata, India" , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def snake_case__ ( __lowercase = 55.68 , __lowercase = 12.57 , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case : int = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break | 182 | 0 |
import math
import sys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
if number != int(UpperCamelCase__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE__ = [-1] * (number + 1)
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE__ = sys.maxsize
SCREAMING_SNAKE_CASE__ = int(math.sqrt(UpperCamelCase__ ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE__ = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A__ : Union[str, Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
A__ : int = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
A__ : Tuple = """|""".join(sys.argv[1:])
A__ : Tuple = re.compile(rf"""^({joined_dirs}).*?\.py$""")
A__ : List[Any] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 233 | 0 |
import baseaa
def __UpperCAmelCase ( __a : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def __UpperCAmelCase ( __a : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(__a ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> Union[str, Any]:
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_a : List[Any] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def __lowercase ( self ) -> List[str]:
_a : List[str] = None
_a : Optional[int] = None
_a : Dict = None
_a : List[Any] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_a : int = self.builder.as_dataset(
split='''train''' , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Any:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Tuple = dataset
_a : Tuple = name
_a : List[str] = con
_a : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : int = num_proc
_a : Union[str, Any] = to_sql_kwargs
def __lowercase ( self ) -> int:
_a : Dict = self.to_sql_kwargs.pop('''sql''' , _a )
_a : List[Any] = self.to_sql_kwargs.pop('''con''' , _a )
_a : int = self.to_sql_kwargs.pop('''index''' , _a )
_a : Tuple = self._write(index=_a , **self.to_sql_kwargs )
return written
def __lowercase ( self , _a ) -> Any:
_a , _a , _a : Any = args
_a : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
_a : List[Any] = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_a : int = batch.to_pandas()
_a : Any = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def __lowercase ( self , _a , **_a ) -> int:
_a : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 578 | 0 |
class _A ( __UpperCamelCase ):
pass
class _A ( __UpperCamelCase ):
pass
class _A :
def __init__(self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = [
[],
[],
[],
]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(SCREAMING_SNAKE_CASE_ )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def _a (self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__(self ) -> str:
'''simple docstring'''
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _A :
def __init__(self ) -> str:
'''simple docstring'''
UpperCamelCase__ = []
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
UpperCamelCase__ = min(self.queue )
self.queue.remove(SCREAMING_SNAKE_CASE_ )
return data
def __str__(self ) -> str:
'''simple docstring'''
return str(self.queue )
def __UpperCamelCase ( ):
UpperCamelCase__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCamelCase ( ):
UpperCamelCase__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 415 | import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__magic_name__ =True
except ImportError:
__magic_name__ =False
__magic_name__ =logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCamelCase ( A ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _A ( __UpperCamelCase ):
@staticmethod
def _a (SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=SCREAMING_SNAKE_CASE_ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=SCREAMING_SNAKE_CASE_ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , *SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = testing
UpperCamelCase__ = testing_file
UpperCamelCase__ = path
def _a (self ) -> Any:
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase__ = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase__ = (
Path(SCREAMING_SNAKE_CASE_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase__ = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(SCREAMING_SNAKE_CASE_ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=SCREAMING_SNAKE_CASE_ , extra_context=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = configuration['''lowercase_modelname''']
UpperCamelCase__ = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"{directory}/configuration.json" )
UpperCamelCase__ = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ = F"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
os.makedirs(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=SCREAMING_SNAKE_CASE_ )
# Tests require submodules as they have parent imports
with open(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ):
pass
shutil.move(
F"{directory}/__init__.py" , F"{model_dir}/__init__.py" , )
shutil.move(
F"{directory}/configuration_{lowercase_model_name}.py" , F"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
UpperCamelCase__ = f.readlines()
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(SCREAMING_SNAKE_CASE_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_{lowercase_model_name}.py" , F"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_tf_{lowercase_model_name}.py" , F"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_tf_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_flax_{lowercase_model_name}.py" , F"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_flax_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/{lowercase_model_name}.md" , F"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
F"{directory}/tokenization_{lowercase_model_name}.py" , F"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/tokenization_fast_{lowercase_model_name}.py" , F"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Create temp file
UpperCamelCase__ , UpperCamelCase__ = mkstemp()
UpperCamelCase__ = False
with fdopen(SCREAMING_SNAKE_CASE_ , '''w''' ) as new_file:
with open(SCREAMING_SNAKE_CASE_ ) as old_file:
for line in old_file:
new_file.write(SCREAMING_SNAKE_CASE_ )
if line_to_copy_below in line:
UpperCamelCase__ = True
for line_to_copy in lines_to_copy:
new_file.write(SCREAMING_SNAKE_CASE_ )
if not line_found:
raise ValueError(F"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Remove original file
remove(SCREAMING_SNAKE_CASE_ )
# Move new file
move(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def skip_units(SCREAMING_SNAKE_CASE_ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ ) as datafile:
UpperCamelCase__ = []
UpperCamelCase__ = False
UpperCamelCase__ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase__ = line.split('''"''' )[1]
UpperCamelCase__ = skip_units(SCREAMING_SNAKE_CASE_ )
elif "# Below: " in line and "##" not in line:
UpperCamelCase__ = line.split('''"''' )[1]
UpperCamelCase__ = skip_units(SCREAMING_SNAKE_CASE_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase__ = []
elif "##" not in line:
lines_to_copy.append(SCREAMING_SNAKE_CASE_ )
remove(SCREAMING_SNAKE_CASE_ )
replace_in_files(F"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(SCREAMING_SNAKE_CASE_ )
| 415 | 1 |
def A_ ( __a : int , __a : int ):
"""simple docstring"""
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 351 |
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A_ ( ):
"""simple docstring"""
a__ = Github(os.environ["""GITHUB_TOKEN"""] )
a__ = g.get_repo("""huggingface/diffusers""" )
a__ = repo.get_issues(state="""open""" )
for issue in open_issues:
a__ = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a )
a__ = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 351 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TextToVideoSDPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__magic_name__ = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_A = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_A = CLIPTextModel(snake_case_ )
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCAmelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith('mps' ):
_A = torch.manual_seed(snake_case_ )
else:
_A = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def lowerCAmelCase__ ( self ):
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = TextToVideoSDPipeline(**snake_case_ )
_A = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs(snake_case_ )
_A = 'np'
_A = sd_pipe(**snake_case_ ).frames
_A = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_A = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
_A = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_A = pipe.to('cuda' )
_A = 'Spiderman is surfing'
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type='pt' ).frames
_A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCAmelCase__ ( self ):
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
_A = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_A = pipe.to('cuda' )
_A = 'Spiderman is surfing'
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type='pt' ).frames
_A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 27 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Dict = VOCAB_FILES_NAMES
_A : List[str] = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : str = ['input_ids', 'attention_mask']
_A : Dict = RobertaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="replace" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , **lowerCamelCase , )
snake_case__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
snake_case__ = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
snake_case__ = add_prefix_space
snake_case__ = pre_tok_class(**lowerCamelCase )
snake_case__ = add_prefix_space
snake_case__ = "post_processor"
snake_case__ = getattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
if tokenizer_component_instance:
snake_case__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ = tuple(state["sep"] )
if "cls" in state:
snake_case__ = tuple(state["cls"] )
snake_case__ = False
if state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
snake_case__ = add_prefix_space
snake_case__ = True
if state.get("trim_offsets" , lowerCamelCase ) != trim_offsets:
snake_case__ = trim_offsets
snake_case__ = True
if changes_to_apply:
snake_case__ = getattr(lowerCamelCase , state.pop("type" ) )
snake_case__ = component_class(**lowerCamelCase )
setattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
@property
def A_ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , lowerCamelCase ):
snake_case__ = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else value
snake_case__ = value
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
snake_case__ = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
snake_case__ = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
snake_case__ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase=None ):
snake_case__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 276 | 0 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__lowercase = '''src/diffusers'''
# Matches is_xxx_available()
__lowercase = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__lowercase = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__lowercase = '''
{0} = None
'''
__lowercase = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__lowercase = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =_re_backend.findall(__UpperCamelCase )
if len(__UpperCamelCase ) == 0:
return None
return "_and_".join(__UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
with open(os.path.join(__UpperCamelCase , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCamelCase =f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase =0
__UpperCamelCase ={}
# Go through the end of the file
while line_index < len(__UpperCamelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
__UpperCamelCase =[]
# Until we unindent, add backend objects to the list
while line_index < len(__UpperCamelCase ) and len(lines[line_index] ) > 1:
__UpperCamelCase =lines[line_index]
__UpperCamelCase =_re_single_line_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__UpperCamelCase ) > 0:
__UpperCamelCase =objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__UpperCamelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__UpperCamelCase , __UpperCamelCase )
else:
return DUMMY_CLASS.format(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : List[str]=None ):
"""simple docstring"""
if backend_specific_objects is None:
__UpperCamelCase =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase ={}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase ='''[''' + ''', '''.join(F"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ''']'''
__UpperCamelCase ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__UpperCamelCase , __UpperCamelCase ) for o in objects] )
__UpperCamelCase =dummy_file
return dummy_files
def lowerCAmelCase (__UpperCamelCase : str=False ):
"""simple docstring"""
__UpperCamelCase =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
__UpperCamelCase =os.path.join(__UpperCamelCase , '''utils''' )
__UpperCamelCase ={
backend: os.path.join(__UpperCamelCase , F"""dummy_{short_names.get(__UpperCamelCase , __UpperCamelCase )}_objects.py""" )
for backend in dummy_files.keys()
}
__UpperCamelCase ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__UpperCamelCase ):
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCamelCase =f.read()
else:
__UpperCamelCase =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(__UpperCamelCase , __UpperCamelCase )}_objects.py as the main """
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F"""diffusers.utils.dummy_{short_names.get(__UpperCamelCase , __UpperCamelCase )}_objects.py. Run `make fix-copies` """
'''to fix this.''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowercase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 296 | """simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ):
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase ={'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCamelCase =features.copy()
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ):
"""simple docstring"""
if issubclass(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase =jsonl_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase =[jsonl_path]
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict=("train",) ):
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
__UpperCamelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader({'''train''': jsonl_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if split:
__UpperCamelCase ={split: jsonl_path}
else:
__UpperCamelCase ='''train'''
__UpperCamelCase ={'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
return json.load(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return [json.loads(__UpperCamelCase ) for line in buffer]
class _lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(exported_content[0] , UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> str:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(exported_content[0] , UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / f"""test.json.{extension}"""
__UpperCamelCase =str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , compression=UpperCamelCase__ ).write()
with fsspec.open(UpperCamelCase__ , '''rb''' , compression='''infer''' ) as f:
__UpperCamelCase =f.read()
with fsspec.open(UpperCamelCase__ , '''rb''' , compression='''infer''' ) as f:
__UpperCamelCase =f.read()
assert exported_content == original_content
| 296 | 1 |
import math
def a ( ):
'''simple docstring'''
lowercase_ = input('''Enter message: ''' )
lowercase_ = int(input(F'''Enter key [2-{len(snake_case__ ) - 1}]: ''' ) )
lowercase_ = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase_ = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowercase_ = decrypt_message(snake_case__ , snake_case__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + '|'}''' )
def a ( snake_case__: int , snake_case__: str ):
'''simple docstring'''
lowercase_ = [''''''] * key
for col in range(snake_case__ ):
lowercase_ = col
while pointer < len(snake_case__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case__ )
def a ( snake_case__: int , snake_case__: str ):
'''simple docstring'''
lowercase_ = math.ceil(len(snake_case__ ) / key )
lowercase_ = key
lowercase_ = (num_cols * num_rows) - len(snake_case__ )
lowercase_ = [''''''] * num_cols
lowercase_ = 0
lowercase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowercase_ = 0
row += 1
return "".join(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 97 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowercase__( unittest.TestCase ):
"""simple docstring"""
a :List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
a :Tuple = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowercase ( self : Tuple ) -> Dict:
lowercase_ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowercase_ = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowercase_ = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowercase_ = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ , num_return_sequences=2 , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
] , )
lowercase_ = text_generator.model.config.eos_token_id
lowercase_ = '''<pad>'''
lowercase_ = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=SCREAMING_SNAKE_CASE_ , num_return_sequences=2 , batch_size=2 , return_tensors=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
],
] , )
@require_tf
def _lowercase ( self : str ) -> Optional[int]:
lowercase_ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowercase_ = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowercase_ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
lowercase_ = TextGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
return text_generator, ["This is a test", "Another test"]
def _lowercase ( self : List[Any] ) -> Optional[int]:
lowercase_ = '''Hello I believe in'''
lowercase_ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , stop_sequence=''' fe''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
lowercase_ = text_generator.model
lowercase_ = text_generator.tokenizer
lowercase_ = text_generator('''This is a test''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowercase_ = text_generator('''This is a test''' , return_full_text=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowercase_ = pipeline(task='''text-generation''' , model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , return_full_text=SCREAMING_SNAKE_CASE_ )
lowercase_ = text_generator('''This is a test''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowercase_ = text_generator('''This is a test''' , return_full_text=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowercase_ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}],
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowercase_ = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}],
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = text_generator('''test''' , return_full_text=SCREAMING_SNAKE_CASE_ , return_text=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = text_generator('''test''' , return_full_text=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = text_generator('''test''' , return_text=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowercase_ = text_generator('''''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowercase_ = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowercase_ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_0_0 , max_new_tokens=2_0 )
lowercase_ = text_generator('''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
text_generator(
'''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowercase ( self : Union[str, Any] ) -> Tuple:
import torch
# Classic `model_kwargs`
lowercase_ = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowercase_ = pipe('''This is a test''' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowercase_ = pipe('''This is a test''' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowercase_ = pipe('''This is a test''' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def _lowercase ( self : Optional[int] ) -> List[Any]:
import torch
lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowercase ( self : int ) -> Dict:
import torch
lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ , top_p=0.5 )
def _lowercase ( self : List[Any] ) -> int:
lowercase_ = '''Hello world'''
lowercase_ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowercase_ = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowercase_ = logging.get_logger('''transformers.generation.utils''' )
lowercase_ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , max_length=1_0 , max_new_tokens=1 )
self.assertIn(SCREAMING_SNAKE_CASE_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 )
self.assertNotIn(SCREAMING_SNAKE_CASE_ , cl.out )
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , max_length=1_0 )
self.assertNotIn(SCREAMING_SNAKE_CASE_ , cl.out )
| 97 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def SCREAMING_SNAKE_CASE__( a__):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =tokenizer(example['''content'''] ,truncation=_UpperCamelCase)['''input_ids''']
_SCREAMING_SNAKE_CASE =len(example['''content''']) / len(output['''input_ids'''])
return output
snake_case_ : int = HfArgumentParser(PretokenizationArguments)
snake_case_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
snake_case_ : Optional[int] = multiprocessing.cpu_count()
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
snake_case_ : Optional[int] = time.time()
snake_case_ : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
snake_case_ : Tuple = time.time()
snake_case_ : Optional[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
snake_case_ : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""") | 721 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCAmelCase = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCAmelCase = "document_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = VisionEncoderDecoderModel
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self : Any , *_a : int , **_a : Dict ) -> int:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_a , **_a )
def __UpperCamelCase ( self : Optional[Any] , _a : "Image" , _a : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_SCREAMING_SNAKE_CASE =task_prompt.replace('''{user_input}''' , _a )
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenizer(
_a , add_special_tokens=_a , return_tensors='''pt''' ).input_ids
_SCREAMING_SNAKE_CASE =self.pre_processor(_a , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any] ) -> int:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_a , ).sequences
def __UpperCamelCase ( self : Any , _a : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.pre_processor.batch_decode(_a )[0]
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_SCREAMING_SNAKE_CASE =re.sub(R'''<.*?>''' , '''''' , _a , count=1 ).strip() # remove first task start token
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenajson(_a )
return sequence["answer"] | 191 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
A = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class a__ ( unittest.TestCase ):
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/"))
__UpperCAmelCase : Dict = self.diffusers_dir
shutil.copy(
os.path.join(__a , "src/diffusers/schedulers/scheduling_ddpm.py") , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py") , )
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = """src/diffusers"""
shutil.rmtree(self.diffusers_dir)
def a_ ( self : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[str]=None):
"""simple docstring"""
__UpperCAmelCase : List[Any] = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__UpperCAmelCase : Union[str, Any] = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
__UpperCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
__UpperCAmelCase : Dict = black.format_str(__a , mode=__a)
__UpperCAmelCase : Tuple = os.path.join(self.diffusers_dir , "new_code.py")
with open(__a , "w" , newline="\n") as f:
f.write(__a)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__a)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=__a)
with open(__a , "r") as f:
self.assertTrue(f.read() , __a)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : int = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput")
self.assertEqual(__a , __a)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , __a , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , __a) , )
# Copy consistency with a really long name
__UpperCAmelCase : Dict = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , F"{long_class_name}SchedulerOutput" , re.sub("Bert" , __a , __a) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , __a , overwrite_result=re.sub("DDPM" , "Test" , __a) , )
| 77 |
def _lowercase ( UpperCAmelCase_=28_123):
"""simple docstring"""
snake_case__ : Dict = [1] * (limit + 1)
for i in range(2 , int(limit**0.5) + 1):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1):
sum_divs[k * i] += k + i
snake_case__ : Union[str, Any] = set()
snake_case__ : Union[str, Any] = 0
for n in range(1 , limit + 1):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase_)
if not any((n - a in abundants) for a in abundants):
res += n
return res
if __name__ == "__main__":
print(solution())
| 648 | 0 |
'''simple docstring'''
def __lowercase (_lowercase = 10 ) -> int:
"""simple docstring"""
if not isinstance(__snake_case, __snake_case ) or n < 0:
raise ValueError("""Invalid input""" )
__lowerCamelCase : int = 10**n
__lowerCamelCase : Union[str, Any] = 28_433 * (pow(2, 7_830_457, __snake_case )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 705 |
'''simple docstring'''
UpperCAmelCase__ :List[Any] = 256
# Modulus to hash a string
UpperCAmelCase__ :str = 1_000_003
def __lowercase (_lowercase, _lowercase ) -> bool:
"""simple docstring"""
__lowerCamelCase : str = len(_lowercase )
__lowerCamelCase : List[str] = len(_lowercase )
if p_len > t_len:
return False
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowercase ):
__lowerCamelCase : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__lowerCamelCase : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__lowerCamelCase : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__lowerCamelCase : Dict = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowercase () -> None:
"""simple docstring"""
__lowerCamelCase : List[Any] = """abc1abc12"""
__lowerCamelCase : Optional[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
__lowerCamelCase : List[Any] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_lowercase, _lowercase ) and not rabin_karp(_lowercase, _lowercase )
# Test 2)
__lowerCamelCase : Optional[int] = """ABABX"""
__lowerCamelCase : Dict = """ABABZABABYABABX"""
assert rabin_karp(_lowercase, _lowercase )
# Test 3)
__lowerCamelCase : Any = """AAAB"""
__lowerCamelCase : int = """ABAAAAAB"""
assert rabin_karp(_lowercase, _lowercase )
# Test 4)
__lowerCamelCase : Any = """abcdabcy"""
__lowerCamelCase : Dict = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_lowercase, _lowercase )
# Test 5)
__lowerCamelCase : str = """Lü"""
__lowerCamelCase : str = """Lüsai"""
assert rabin_karp(_lowercase, _lowercase )
__lowerCamelCase : Tuple = """Lue"""
assert not rabin_karp(_lowercase, _lowercase )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 483 | 0 |
'''simple docstring'''
import re
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , __UpperCamelCase ) ) != len(__UpperCamelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> int:
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Dict:
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_A = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> Optional[Any]:
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Union[str, Any]:
_A = ()
for resnet in self.resnets:
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> str:
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Dict:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_A = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCamelCase )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =True
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> Optional[Any]:
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> Dict:
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCamelCase )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
__UpperCAmelCase =42
__UpperCAmelCase =0.0
__UpperCAmelCase =1
__UpperCAmelCase =1
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =jnp.floataa
def UpperCamelCase ( self )-> Dict:
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_A = resnets
_A = attentions
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True )-> List[Any]:
_A = self.resnets[0](_UpperCamelCase , _UpperCamelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_A = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
return hidden_states
| 292 | 0 |
def _A( UpperCamelCase__ : int = 6008_5147_5143 ) -> int:
'''simple docstring'''
try:
__lowercase = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__lowercase = 2
__lowercase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowercase = i
while n % i == 0:
__lowercase = n // i
i += 1
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 721 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 362 | 0 |
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=A ):
'''simple docstring'''
_A : Union[str, Any] = ['''note_seq''']
def __init__( self : str , *_a : int , **_a : Any ):
requires_backends(self , ['''note_seq'''] )
@classmethod
def A_ ( cls : Optional[int] , *_a : Any , **_a : List[str] ):
requires_backends(cls , ['''note_seq'''] )
@classmethod
def A_ ( cls : Union[str, Any] , *_a : List[Any] , **_a : List[str] ):
requires_backends(cls , ['''note_seq'''] )
| 240 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[str] ):
UpperCamelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ = dict(zip(_a , range(len(_a ) ) ) )
UpperCamelCase__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCamelCase__ = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
UpperCamelCase__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCamelCase__ = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def A_ ( self : Optional[Any] , **_a : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def A_ ( self : Dict , **_a : List[Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def A_ ( self : Union[str, Any] , **_a : Union[str, Any] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def A_ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : List[str] ):
UpperCamelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def A_ ( self : int ):
UpperCamelCase__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
UpperCamelCase__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(_a , return_tensors='''np''' )
UpperCamelCase__ = processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = processor(text=_a )
UpperCamelCase__ = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Any ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def A_ ( self : Dict ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(_a )
UpperCamelCase__ = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 240 | 1 |
class __lowerCAmelCase : # Public class to implement a graph
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , A_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , A_ )
def lowercase_ ( self ) -> int: # And finally, count all islands.
'''simple docstring'''
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(A_ , A_ , A_ )
count += 1
return count
| 705 |
__A = "Input must be a string of 8 numbers plus letter"
__A = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = F"""Expected string as input, found {type(UpperCamelCase__ ).__name__}"""
raise TypeError(UpperCamelCase__ )
__lowerCamelCase = spanish_id.replace('-' , '' ).upper()
if len(UpperCamelCase__ ) != 9:
raise ValueError(UpperCamelCase__ )
try:
__lowerCamelCase = int(spanish_id_clean[0:8] )
__lowerCamelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCamelCase__ ) from ex
if letter.isdigit():
raise ValueError(UpperCamelCase__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = 'switch_transformers'
snake_case = ['past_key_values']
snake_case = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any] , __snake_case : Dict=32128 , __snake_case : Any=768 , __snake_case : Optional[Any]=64 , __snake_case : Union[str, Any]=2048 , __snake_case : str=64 , __snake_case : Optional[int]=12 , __snake_case : int=3 , __snake_case : Dict=12 , __snake_case : Optional[Any]=3 , __snake_case : Any=12 , __snake_case : str=8 , __snake_case : Optional[Any]=False , __snake_case : List[Any]=0.01 , __snake_case : Dict="float32" , __snake_case : Optional[int]=False , __snake_case : str=32 , __snake_case : Optional[Any]=128 , __snake_case : Dict=0.1 , __snake_case : int=1e-6 , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0.001 , __snake_case : List[Any]=1.0 , __snake_case : str="relu" , __snake_case : Tuple=True , __snake_case : List[Any]=False , __snake_case : Tuple=True , __snake_case : Optional[int]=0 , __snake_case : Tuple=1 , **__snake_case : Optional[Any] , ) -> int:
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = d_model
lowerCamelCase = d_kv
lowerCamelCase = d_ff
lowerCamelCase = num_sparse_encoder_layers
lowerCamelCase = num_layers
lowerCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCamelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCamelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCamelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCamelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCamelCase = num_heads
lowerCamelCase = num_experts
lowerCamelCase = expert_capacity
lowerCamelCase = router_bias
lowerCamelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowerCamelCase = router_dtype
lowerCamelCase = router_ignore_padding_tokens
lowerCamelCase = relative_attention_num_buckets
lowerCamelCase = relative_attention_max_distance
lowerCamelCase = dropout_rate
lowerCamelCase = layer_norm_epsilon
lowerCamelCase = initializer_factor
lowerCamelCase = feed_forward_proj
lowerCamelCase = use_cache
lowerCamelCase = add_router_probs
lowerCamelCase = router_z_loss_coef
lowerCamelCase = router_aux_loss_coef
lowerCamelCase = self.feed_forward_proj.split('-' )
lowerCamelCase = act_info[-1]
lowerCamelCase = act_info[0] == """gated"""
if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase = """gelu_new"""
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , )
| 246 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __snake_case ( __A ) -> Any:
def wrapper(*__A ,**__A ):
lowercase : Tuple = timeit.default_timer()
lowercase : List[Any] = func(*__A ,**__A )
lowercase : str = timeit.default_timer() - starttime
return delta
lowercase : Optional[int] = func.__name__
return wrapper
def __snake_case ( __A ,__A=100 ,__A=None ) -> Optional[int]:
lowercase : List[Any] = []
lowercase : Tuple = seq_shapes or {}
for i in range(__A ):
lowercase : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__A ,_ArrayXD ):
lowercase : Optional[Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__A ,datasets.Value ):
if v.dtype == "string":
lowercase : Tuple = """The small grey turtle was surprisingly fast when challenged."""
else:
lowercase : Optional[Any] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__A ,datasets.Sequence ):
while isinstance(__A ,datasets.Sequence ):
lowercase : List[Any] = v.feature
lowercase : Optional[int] = seq_shapes[k]
lowercase : List[str] = np.random.rand(*__A ).astype(v.dtype )
lowercase : Any = data
dummy_data.append((i, example) )
return dummy_data
def __snake_case ( __A ,__A ,__A=100 ,__A=None ) -> Optional[Any]:
lowercase : Tuple = generate_examples(__A ,num_examples=__A ,seq_shapes=__A )
with ArrowWriter(features=__A ,path=__A ) as writer:
for key, record in dummy_data:
lowercase : int = features.encode_example(__A )
writer.write(__A )
lowercase , lowercase : List[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
lowercase : Dict = datasets.Dataset.from_file(filename=__A ,info=datasets.DatasetInfo(features=__A ) )
return dataset
| 607 | 0 |
"""simple docstring"""
import sys
def snake_case__ ( _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = len(_lowerCamelCase )
__lowercase : str = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
__lowercase : List[str] = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
for chain_length in range(2, _lowerCamelCase ):
for a in range(1, n - chain_length + 1 ):
__lowercase : str = a + chain_length - 1
__lowercase : Any = sys.maxsize
for c in range(_lowerCamelCase, _lowerCamelCase ):
__lowercase : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowercase : Dict = cost
__lowercase : Any = c
return matrix, sol
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Optional[Any]:
"""simple docstring"""
if i == j:
print("A" + str(_lowerCamelCase ), end=" " )
else:
print("(", end=" " )
print_optiomal_solution(_lowerCamelCase, _lowerCamelCase, optimal_solution[i][j] )
print_optiomal_solution(_lowerCamelCase, optimal_solution[i][j] + 1, _lowerCamelCase )
print(")", end=" " )
def snake_case__ ( ) ->Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = [30, 35, 15, 5, 10, 20, 25]
__lowercase : int = len(_lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowercase : List[Any] = matrix_chain_order(_lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(_lowerCamelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 718 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = None ) ->list[list[str]]:
"""simple docstring"""
__lowercase : List[Any] = word_bank or []
# create a table
__lowercase : int = len(_lowerCamelCase ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(_lowerCamelCase ):
table.append([] )
# seed value
__lowercase : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_lowerCamelCase )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_lowerCamelCase )]:
combination.reverse()
return table[len(_lowerCamelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 281 | 0 |
from __future__ import annotations
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) <= 1 or n <= 1:
return
insert_next(_SCREAMING_SNAKE_CASE , n - 1 )
rec_insertion_sort(_SCREAMING_SNAKE_CASE , n - 1 )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_A, _A = (
collection[index],
collection[index - 1],
)
insert_next(_SCREAMING_SNAKE_CASE , index + 1 )
if __name__ == "__main__":
__A : str = input("Enter integers separated by spaces: ")
__A : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 27 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : str = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 493 | 0 |
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> list:
lowercase__ : Optional[Any] = [0] * len(SCREAMING_SNAKE_CASE_ )
for i in range(1 ,len(SCREAMING_SNAKE_CASE_ ) ):
# use last results for better performance - dynamic programming
lowercase__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : List[Any] = j
return prefix_result
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
return max(prefix_function(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 703 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import * | 298 | 0 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowerCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> Tuple:
lowerCamelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCamelCase_ = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
AutoTokenizer.from_pretrained(UpperCAmelCase__ ).save_pretrained(UpperCAmelCase__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 272 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( a ):
__A = ["""image_processor""", """tokenizer"""]
__A = """LayoutLMv2ImageProcessor"""
__A = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ):
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase_ , )
lowerCamelCase =kwargs.pop("""feature_extractor""" )
lowerCamelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
lowerCamelCase =self.image_processor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase =features["""words"""]
lowerCamelCase =self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# add pixel values
lowerCamelCase =features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
lowerCamelCase =self.get_overflowing_images(UpperCAmelCase_ , encoded_inputs["""overflow_to_sample_mapping"""] )
lowerCamelCase =images
return encoded_inputs
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}""" )
return images_with_overflow
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _snake_case ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _snake_case ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase_ , )
return self.image_processor_class
@property
def _snake_case ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase_ , )
return self.image_processor
| 269 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : List[Any] ={'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict =['''YolosFeatureExtractor''']
UpperCAmelCase__ : List[Any] =['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] =[
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 269 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __A ( __lowerCamelCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''canine'''
def __init__(self , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=16_384 , A=16 , A=0.02 , A=1E-12 , A=0 , A=0xE_000 , A=0xE_001 , A=4 , A=4 , A=8 , A=16_384 , A=128 , **A , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
# Character config:
_a = downsampling_rate
_a = upsampling_kernel_size
_a = num_hash_functions
_a = num_hash_buckets
_a = local_transformer_stride
| 11 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=4 , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Any = batch_size
lowerCamelCase_ : List[str] = seq_length
lowerCamelCase_ : Dict = is_training
lowerCamelCase_ : List[Any] = use_attention_mask
lowerCamelCase_ : Tuple = use_token_type_ids
lowerCamelCase_ : Dict = use_labels
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Tuple = attention_probs_dropout_prob
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : Tuple = type_sequence_label_size
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Optional[Any] = num_choices
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Any = None
if self.use_attention_mask:
lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Dict = None
if self.use_token_type_ids:
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = config_and_inputs
lowerCamelCase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = FlaxRoFormerModelTester(self )
@slow
def _UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : List[str] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=a_ )
lowerCamelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
lowerCamelCase_ : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ : Any = model(a_ )[0]
lowerCamelCase_ : List[str] = 5_0000
lowerCamelCase_ : List[Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , a_ )
lowerCamelCase_ : Tuple = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
| 250 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_A = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_A = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_A = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_A = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_A = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowercase_ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
for tf_name, hf_name in patterns:
snake_case = k.replace(A__ , A__ )
return k
def lowercase_ ( A__ , A__ ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
snake_case = BigBirdPegasusConfig(**A__ )
snake_case = BigBirdPegasusForConditionalGeneration(A__ )
snake_case = torch_model.state_dict()
snake_case = {}
# separating decoder weights
snake_case = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
snake_case = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
snake_case = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
snake_case = DECODER_PATTERNS
snake_case = rename_state_dict_key(A__ , A__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
snake_case = v.T
snake_case = torch.from_numpy(A__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
snake_case = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
snake_case = REMAINING_PATTERNS
snake_case = rename_state_dict_key(A__ , A__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
snake_case = v.T
snake_case = torch.from_numpy(A__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
snake_case = mapping["model.embed_positions.weight"]
snake_case = mapping.pop("model.embed_positions.weight" )
snake_case , snake_case = torch_model.load_state_dict(A__ , strict=A__ )
snake_case = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def lowercase_ ( A__ ) -> Dict:
"""simple docstring"""
snake_case = tf.train.list_variables(A__ )
snake_case = {}
snake_case = ["global_step"]
for name, shape in tqdm(A__ , desc="converting tf checkpoint to dict" ):
snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case = tf.train.load_variable(A__ , A__ )
snake_case = array
return tf_weights
def lowercase_ ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
snake_case = get_tf_weights_as_numpy(A__ )
snake_case = convert_bigbird_pegasus(A__ , A__ )
torch_model.save_pretrained(A__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_A = parser.parse_args()
_A = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 294 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase ( A_ , unittest.TestCase ):
UpperCAmelCase__ : Any = KandinskyVaaControlnetImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
UpperCAmelCase__ : List[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
UpperCAmelCase__ : List[str] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : List[str] = False
@property
def UpperCAmelCase(self : Optional[int] ) -> Dict:
return 3_2
@property
def UpperCAmelCase(self : Union[str, Any] ) -> Optional[int]:
return 3_2
@property
def UpperCAmelCase(self : Optional[Any] ) -> Union[str, Any]:
return self.time_input_dim
@property
def UpperCAmelCase(self : Union[str, Any] ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase(self : Optional[Any] ) -> int:
return 1_0_0
@property
def UpperCAmelCase(self : int ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case = UNetaDConditionModel(**_A )
return model
@property
def UpperCAmelCase(self : Any ) -> List[str]:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase(self : Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase(self : Any ) -> Optional[Any]:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
snake_case = DDIMScheduler(**_A )
snake_case = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase(self : Any , _A : Any , _A : Optional[Any]=0 ) -> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create hint
snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith("mps" ):
snake_case = torch.manual_seed(_A )
else:
snake_case = torch.Generator(device=_A ).manual_seed(_A )
snake_case = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase(self : str ) -> Union[str, Any]:
snake_case = "cpu"
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**_A )
snake_case = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
snake_case = pipe(**self.get_dummy_inputs(_A ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : int ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase(self : int ) -> Tuple:
snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
snake_case = init_image.resize((5_1_2, 5_1_2) )
snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
snake_case = torch.from_numpy(np.array(_A ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = "A robot, 4k photo"
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
snake_case = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
snake_case = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
_A , image=_A , strength=0.85 , generator=_A , negative_prompt="" , ).to_tuple()
snake_case = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , hint=_A , generator=_A , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="np" , )
snake_case = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_A , _A )
| 294 | 1 |
import baseaa
def lowercase ( __A : str ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowercase ( __A : bytes ) -> str:
'''simple docstring'''
return baseaa.baadecode(__A ).decode("""utf-8""" )
if __name__ == "__main__":
__lowercase : int = '''Hello World!'''
__lowercase : Union[str, Any] = baseaa_encode(test)
print(encoded)
__lowercase : Any = baseaa_decode(encoded)
print(decoded)
| 36 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="SpeechT5FeatureExtractor"
a : Any ="SpeechT5Tokenizer"
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = kwargs.pop("audio" , snake_case__ )
lowerCAmelCase : Tuple = kwargs.pop("text" , snake_case__ )
lowerCAmelCase : str = kwargs.pop("text_target" , snake_case__ )
lowerCAmelCase : List[str] = kwargs.pop("audio_target" , snake_case__ )
lowerCAmelCase : Union[str, Any] = kwargs.pop("sampling_rate" , snake_case__ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
lowerCAmelCase : int = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
elif text is not None:
lowerCAmelCase : Optional[int] = self.tokenizer(snake_case__ , **snake_case__ )
else:
lowerCAmelCase : Union[str, Any] = None
if audio_target is not None:
lowerCAmelCase : Optional[Any] = self.feature_extractor(audio_target=snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
lowerCAmelCase : Any = targets["input_values"]
elif text_target is not None:
lowerCAmelCase : Tuple = self.tokenizer(snake_case__ , **snake_case__ )
lowerCAmelCase : str = targets["input_ids"]
else:
lowerCAmelCase : str = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : List[str] = labels
lowerCAmelCase : List[Any] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase : Union[str, Any] = decoder_attention_mask
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = kwargs.pop("input_values" , snake_case__ )
lowerCAmelCase : List[Any] = kwargs.pop("input_ids" , snake_case__ )
lowerCAmelCase : Dict = kwargs.pop("labels" , snake_case__ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
lowerCAmelCase : int = self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
elif input_ids is not None:
lowerCAmelCase : Optional[Any] = self.tokenizer.pad(snake_case__ , **snake_case__ )
else:
lowerCAmelCase : Optional[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(snake_case__ , snake_case__ ) and "input_ids" in labels[0]):
lowerCAmelCase : Tuple = self.tokenizer.pad(snake_case__ , **snake_case__ )
lowerCAmelCase : Any = targets["input_ids"]
else:
lowerCAmelCase : List[Any] = self.feature_extractor.feature_size
lowerCAmelCase : Optional[int] = self.feature_extractor.num_mel_bins
lowerCAmelCase : str = self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = feature_size_hack
lowerCAmelCase : Optional[Any] = targets["input_values"]
else:
lowerCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : int = labels
lowerCAmelCase : Optional[int] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase : List[Any] = decoder_attention_mask
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
| 645 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _a ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = u
for i in range(1 , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = temp * (u - i)
return temp
def _a ( ):
'''simple docstring'''
_UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
_UpperCAmelCase = []
for _ in range(_SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
y[i].append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
_UpperCAmelCase = list(map(_SCREAMING_SNAKE_CASE , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = float(input() )
_UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
_UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
_UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
_UpperCAmelCase = y[0][0]
for i in range(1 , _SCREAMING_SNAKE_CASE ):
summ += (ucal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(_SCREAMING_SNAKE_CASE )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 700 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Tuple )->int:
_UpperCAmelCase = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_UpperCAmelCase = test_metrics
@require_cpu
def lowercase__ ( self : Any )->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowercase__ ( self : List[str] )->List[str]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowercase__ ( self : List[Any] )->Dict:
self.test_metrics.main()
@require_multi_gpu
def lowercase__ ( self : str )->int:
print(F'Found {torch.cuda.device_count()} devices.' )
_UpperCAmelCase = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() )
| 95 | 0 |
from __future__ import annotations
from typing import TypedDict
class a ( lowercase__ ):
"""simple docstring"""
a : str
a : int
def lowerCamelCase__ ( __lowerCamelCase : str ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__lowerCamelCase ) )]
def lowerCamelCase__ ( __lowerCamelCase : str ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
__UpperCAmelCase : List[Any] = all_rotations(__lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__UpperCAmelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowerCamelCase ),
}
return response
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
__UpperCAmelCase : Tuple = int(__lowerCamelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__lowerCamelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
__UpperCAmelCase : Tuple = [""""""] * len(__lowerCamelCase )
for _ in range(len(__lowerCamelCase ) ):
for i in range(len(__lowerCamelCase ) ):
__UpperCAmelCase : Optional[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a : int = "Provide a string that I will generate its BWT transform: "
a : str = input(entry_msg).strip()
a : List[Any] = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
a : Union[str, Any] = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
)
| 63 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = b.T
SCREAMING_SNAKE_CASE_ :str = np.sum(np.square(a ) , axis=1 )
SCREAMING_SNAKE_CASE_ :List[Any] = np.sum(np.square(a ) , axis=0 )
SCREAMING_SNAKE_CASE_ :Optional[int] = np.matmul(a , a )
SCREAMING_SNAKE_CASE_ :Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :str = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_ :str = squared_euclidean_distance(a , a )
return np.argmin(a , axis=1 )
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : List[str] = ["""pixel_values"""]
def __init__( self : Dict , UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(**UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = size if size is not None else {"height": 2_56, "width": 2_56}
SCREAMING_SNAKE_CASE_ :Optional[int] = get_size_dict(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = np.array(UpperCAmelCase) if clusters is not None else None
SCREAMING_SNAKE_CASE_ :List[Any] = do_resize
SCREAMING_SNAKE_CASE_ :Dict = size
SCREAMING_SNAKE_CASE_ :Optional[Any] = resample
SCREAMING_SNAKE_CASE_ :Dict = do_normalize
SCREAMING_SNAKE_CASE_ :Tuple = do_color_quantize
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ):
SCREAMING_SNAKE_CASE_ :str = get_size_dict(UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_ :int = rescale(image=UpperCAmelCase , scale=1 / 127.5 , data_format=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[str] = image - 1
return image
def _snake_case ( self : List[str] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ :Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ :Dict = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = get_size_dict(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ :Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ :List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_ :Tuple = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_ :Tuple = np.array(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = make_list_of_images(UpperCAmelCase)
if not valid_images(UpperCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [to_numpy_array(UpperCAmelCase) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ :Tuple = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ :int = [self.normalize(image=UpperCAmelCase) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_ :Optional[Any] = [to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_ :Optional[int] = np.array(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = color_quantize(UpperCAmelCase , UpperCAmelCase).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_ :Any = images.shape[0]
SCREAMING_SNAKE_CASE_ :Dict = images.reshape(UpperCAmelCase , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_ :Any = list(UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE_ :List[Any] = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase) for image in images]
SCREAMING_SNAKE_CASE_ :str = {"input_ids": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase)
| 631 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__( lowerCamelCase ):
if "cls_token" in name:
__lowerCAmelCase = name.replace('''cls_token''', '''vit.embeddings.cls_token''' )
if "mask_token" in name:
__lowerCAmelCase = name.replace('''mask_token''', '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace('''decoder_pos_embed''', '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace('''pos_embed''', '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace('''patch_embed.proj''', '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace('''patch_embed.norm''', '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
__lowerCAmelCase = name.replace('''decoder_blocks''', '''decoder.decoder_layers''' )
if "blocks" in name:
__lowerCAmelCase = name.replace('''blocks''', '''vit.encoder.layer''' )
if "attn.proj" in name:
__lowerCAmelCase = name.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in name:
__lowerCAmelCase = name.replace('''attn''', '''attention.self''' )
if "norm1" in name:
__lowerCAmelCase = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
__lowerCAmelCase = name.replace('''norm2''', '''layernorm_after''' )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace('''mlp.fc2''', '''output.dense''' )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace('''decoder_embed''', '''decoder.decoder_embed''' )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace('''decoder_norm''', '''decoder.decoder_norm''' )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace('''decoder_pred''', '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
__lowerCAmelCase = name.replace('''norm.weight''', '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
__lowerCAmelCase = name.replace('''norm.bias''', '''vit.layernorm.bias''' )
return name
def __magic_name__( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
__lowerCAmelCase = key.split('''.''' )
__lowerCAmelCase = int(key_split[1] )
if "decoder_blocks" in key:
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = '''decoder.decoder_layers.'''
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = '''vit.encoder.layer.'''
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = val
return orig_state_dict
def __magic_name__( lowerCamelCase, lowerCamelCase ):
__lowerCAmelCase = ViTMAEConfig()
if "large" in checkpoint_url:
__lowerCAmelCase = 1_0_2_4
__lowerCAmelCase = 4_0_9_6
__lowerCAmelCase = 2_4
__lowerCAmelCase = 1_6
elif "huge" in checkpoint_url:
__lowerCAmelCase = 1_4
__lowerCAmelCase = 1_2_8_0
__lowerCAmelCase = 5_1_2_0
__lowerCAmelCase = 3_2
__lowerCAmelCase = 1_6
__lowerCAmelCase = ViTMAEForPreTraining(lowerCamelCase )
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location='''cpu''' )['''model''']
__lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
__lowerCAmelCase = convert_state_dict(lowerCamelCase, lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
__lowerCAmelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
__lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
__lowerCAmelCase = image_processor(images=lowerCamelCase, return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__lowerCAmelCase = model(**lowerCamelCase )
__lowerCAmelCase = outputs.logits
if "large" in checkpoint_url:
__lowerCAmelCase = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
__lowerCAmelCase = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
__lowerCAmelCase = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3], lowerCamelCase, atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 710 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_UpperCAmelCase : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCAmelCase = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''')
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
return new_state_dict
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = ''''''
if is_panoptic:
__lowerCAmelCase = '''conditional_detr.'''
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""")
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""")
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:2_5_6, :]
__lowerCAmelCase = in_proj_bias[:2_5_6]
__lowerCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
__lowerCAmelCase = in_proj_bias[2_5_6:5_1_2]
__lowerCAmelCase = in_proj_weight[-2_5_6:, :]
__lowerCAmelCase = in_proj_bias[-2_5_6:]
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowerCAmelCase = '''resnet101'''
if "dc5" in model_name:
__lowerCAmelCase = True
__lowerCAmelCase = '''panoptic''' in model_name
if is_panoptic:
__lowerCAmelCase = 2_5_0
else:
__lowerCAmelCase = 9_1
__lowerCAmelCase = '''huggingface/label-files'''
__lowerCAmelCase = '''coco-detection-id2label.json'''
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type='''dataset'''), '''r'''))
__lowerCAmelCase = {int(lowerCamelCase): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# load image processor
__lowerCAmelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowerCAmelCase = ConditionalDetrImageProcessor(format=lowerCamelCase)
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCamelCase, return_tensors='''pt''')
__lowerCAmelCase = encoding['''pixel_values''']
logger.info(F"""Converting model {model_name}...""")
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('''DeppMeng/ConditionalDETR''', lowerCamelCase, pretrained=lowerCamelCase).eval()
__lowerCAmelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowerCAmelCase = '''conditional_detr.''' + src
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = rename_backbone_keys(lowerCamelCase)
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase, is_panoptic=lowerCamelCase)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''')
and not key.startswith('''class_labels_classifier''')
and not key.startswith('''bbox_predictor''')
):
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
elif key.startswith('''bbox_attention''') or key.startswith('''mask_head'''):
continue
else:
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
else:
if not key.startswith('''class_labels_classifier''') and not key.startswith('''bbox_predictor'''):
__lowerCAmelCase = state_dict.pop(lowerCamelCase)
__lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase = ConditionalDetrForSegmentation(lowerCamelCase) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
model.push_to_hub(repo_id=lowerCamelCase, organization='''DepuMeng''', commit_message='''Add model''')
# verify our conversion
__lowerCAmelCase = conditional_detr(lowerCamelCase)
__lowerCAmelCase = model(lowerCamelCase)
assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1E-4)
assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1E-4)
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1E-4)
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""")
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
image_processor.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 474 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A = random.Random()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple=1.0 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[Any]=None) -> Optional[int]:
'''simple docstring'''
if rng is None:
_lowercase : Any = global_rng
_lowercase : Optional[int] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] ,UpperCamelCase : List[Any] ,UpperCamelCase : Dict=7 ,UpperCamelCase : str=400 ,UpperCamelCase : str=2000 ,UpperCamelCase : Tuple=1 ,UpperCamelCase : List[str]=0.0 ,UpperCamelCase : int=1_6000 ,UpperCamelCase : Any=True ,UpperCamelCase : Dict=80 ,UpperCamelCase : str=16 ,UpperCamelCase : List[Any]=64 ,UpperCamelCase : List[Any]="hann_window" ,UpperCamelCase : Optional[int]=80 ,UpperCamelCase : List[Any]=7600 ,UpperCamelCase : int=1e-10 ,UpperCamelCase : Optional[int]=True ,) -> Tuple:
_lowercase : List[str] = parent
_lowercase : Dict = batch_size
_lowercase : Optional[Any] = min_seq_length
_lowercase : Any = max_seq_length
_lowercase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowercase : Tuple = feature_size
_lowercase : Union[str, Any] = padding_value
_lowercase : Any = sampling_rate
_lowercase : Optional[Any] = do_normalize
_lowercase : str = num_mel_bins
_lowercase : List[Any] = hop_length
_lowercase : List[str] = win_length
_lowercase : List[str] = win_function
_lowercase : List[str] = fmin
_lowercase : List[str] = fmax
_lowercase : int = mel_floor
_lowercase : str = return_attention_mask
def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCamelCase ( self : Optional[Any] ,UpperCamelCase : List[str]=False ,UpperCamelCase : Dict=False ) -> List[Any]:
def _flatten(UpperCamelCase : List[Any] ):
return list(itertools.chain(*UpperCamelCase ) )
if equal_length:
_lowercase : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowercase : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_lowercase : Optional[int] = [np.asarray(UpperCamelCase ) for x in speech_inputs]
return speech_inputs
def _lowerCamelCase ( self : int ,UpperCamelCase : List[Any]=False ,UpperCamelCase : Dict=False ) -> str:
if equal_length:
_lowercase : List[str] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowercase : int = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_lowercase : List[Any] = [np.asarray(UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = SpeechTaFeatureExtractor
def _lowerCamelCase ( self : List[Any] ) -> Tuple:
_lowercase : int = SpeechTaFeatureExtractionTester(self )
def _lowerCamelCase ( self : Any ,UpperCamelCase : Any ) -> Optional[int]:
self.assertTrue(np.all(np.mean(UpperCamelCase ,axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase ,axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCamelCase ( self : Optional[int] ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowercase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowercase : List[str] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_lowercase : str = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_lowercase : Union[str, Any] = feat_extract(speech_inputs[0] ,return_tensors='np' ).input_values
_lowercase : str = feat_extract(np_speech_inputs[0] ,return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1e-3 ) )
# Test batched
_lowercase : int = feat_extract(UpperCamelCase ,return_tensors='np' ).input_values
_lowercase : Tuple = feat_extract(UpperCamelCase ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase ,UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1e-3 ) )
def _lowerCamelCase ( self : Tuple ) -> Dict:
_lowercase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_lowercase : str = ['longest', 'max_length', 'do_not_pad']
_lowercase : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase ,UpperCamelCase ):
_lowercase : List[str] = feat_extract(UpperCamelCase ,padding=UpperCamelCase ,max_length=UpperCamelCase ,return_tensors='np' )
_lowercase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCamelCase ( self : int ) -> Any:
_lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : Optional[int] = range(800 ,1400 ,200 )
_lowercase : Any = [floats_list((1, x) )[0] for x in lengths]
_lowercase : Union[str, Any] = ['longest', 'max_length', 'do_not_pad']
_lowercase : List[Any] = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase ,UpperCamelCase ):
_lowercase : Optional[Any] = feat_extract(UpperCamelCase ,max_length=UpperCamelCase ,padding=UpperCamelCase )
_lowercase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
_lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : str = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_lowercase : Dict = feat_extract(
UpperCamelCase ,truncation=UpperCamelCase ,max_length=1000 ,padding='max_length' ,return_tensors='np' )
_lowercase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
_lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_lowercase : Optional[Any] = feat_extract(
UpperCamelCase ,truncation=UpperCamelCase ,max_length=1000 ,padding='longest' ,return_tensors='np' )
_lowercase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_lowercase : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_lowercase : int = feat_extract(
UpperCamelCase ,truncation=UpperCamelCase ,max_length=2000 ,padding='longest' ,return_tensors='np' )
_lowercase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def _lowerCamelCase ( self : Dict ) -> Tuple:
_lowercase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : Tuple = np.random.rand(100 ).astype(np.floataa )
_lowercase : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowercase : Optional[int] = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowercase : List[str] = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCamelCase ( self : Optional[int] ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowercase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowercase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_lowercase : Any = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_lowercase : List[str] = feature_extractor(audio_target=UpperCamelCase ,padding=UpperCamelCase ,return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_lowercase : List[Any] = feature_extractor(speech_inputs[0] ,return_tensors='np' ).input_values
_lowercase : Optional[int] = feature_extractor(np_speech_inputs[0] ,return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1e-3 ) )
# Test batched
_lowercase : Optional[int] = feature_extractor(UpperCamelCase ,return_tensors='np' ).input_values
_lowercase : int = feature_extractor(UpperCamelCase ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase ,UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowercase : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowercase : List[str] = np.asarray(UpperCamelCase )
_lowercase : Optional[Any] = feature_extractor(UpperCamelCase ,return_tensors='np' ).input_values
_lowercase : Any = feature_extractor(UpperCamelCase ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase ,UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1e-3 ) )
def _lowerCamelCase ( self : int ) -> str:
_lowercase : Dict = self.feat_extract_tester.prepare_inputs_for_target()
_lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : Optional[Any] = feat_extract.model_input_names[0]
_lowercase : Optional[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCamelCase ) == len(UpperCamelCase ) for x, y in zip(UpperCamelCase ,processed_features[input_name] ) ) )
_lowercase : Dict = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCamelCase )
_lowercase : List[str] = BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
_lowercase : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowercase : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self : List[Any] ) -> str:
_lowercase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCamelCase )
_lowercase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : List[Any] = feat_extract.model_input_names[0]
_lowercase : Union[str, Any] = BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
_lowercase : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowercase : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self : Any ) -> str:
_lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : str = self.feat_extract_tester.prepare_inputs_for_target()
_lowercase : Dict = feat_extract.model_input_names[0]
_lowercase : Any = BatchFeature({input_name: speech_inputs} )
_lowercase : Dict = feat_extract.num_mel_bins # hack!
_lowercase : Optional[int] = feat_extract.pad(UpperCamelCase ,padding='longest' ,return_tensors='np' )[input_name]
_lowercase : Optional[int] = feat_extract.pad(UpperCamelCase ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCamelCase ( self : Union[str, Any] ) -> str:
_lowercase : Optional[Any] = self.feat_extract_dict
_lowercase : str = True
_lowercase : str = self.feature_extraction_class(**UpperCamelCase )
_lowercase : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
_lowercase : List[Any] = [len(UpperCamelCase ) for x in speech_inputs]
_lowercase : str = feat_extract.model_input_names[0]
_lowercase : Tuple = BatchFeature({input_name: speech_inputs} )
_lowercase : Union[str, Any] = feat_extract.num_mel_bins # hack!
_lowercase : Any = feat_extract.pad(UpperCamelCase ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,UpperCamelCase )
def _lowerCamelCase ( self : int ) -> int:
_lowercase : Tuple = self.feat_extract_dict
_lowercase : Any = True
_lowercase : List[Any] = self.feature_extraction_class(**UpperCamelCase )
_lowercase : int = self.feat_extract_tester.prepare_inputs_for_target()
_lowercase : Optional[int] = [len(UpperCamelCase ) for x in speech_inputs]
_lowercase : Any = feat_extract.model_input_names[0]
_lowercase : Any = BatchFeature({input_name: speech_inputs} )
_lowercase : Union[str, Any] = min(UpperCamelCase )
_lowercase : List[str] = feat_extract.num_mel_bins # hack!
_lowercase : List[Any] = feat_extract.pad(
UpperCamelCase ,padding='max_length' ,max_length=UpperCamelCase ,truncation=UpperCamelCase ,return_tensors='np' )
self.assertIn('attention_mask' ,UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
def _lowerCamelCase ( self : Dict ,UpperCamelCase : List[str] ) -> Any:
from datasets import load_dataset
_lowercase : Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation' )
# automatic decoding with librispeech
_lowercase : Dict = ds.sort('id' ).select(range(UpperCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self : Dict ) -> List[str]:
# fmt: off
_lowercase : List[Any] = torch.tensor(
[2.3_804e-03, 2.0_752e-03, 1.9_836e-03, 2.1_057e-03, 1.6_174e-03,
3.0_518e-04, 9.1_553e-05, 3.3_569e-04, 9.7_656e-04, 1.8_311e-03,
2.0_142e-03, 2.1_057e-03, 1.7_395e-03, 4.5_776e-04, -3.9_673e-04,
4.5_776e-04, 1.0_071e-03, 9.1_553e-05, 4.8_828e-04, 1.1_597e-03,
7.3_242e-04, 9.4_604e-04, 1.8_005e-03, 1.8_311e-03, 8.8_501e-04,
4.2_725e-04, 4.8_828e-04, 7.3_242e-04, 1.0_986e-03, 2.1_057e-03] )
# fmt: on
_lowercase : int = self._load_datasamples(1 )
_lowercase : Optional[Any] = SpeechTaFeatureExtractor()
_lowercase : Any = feature_extractor(UpperCamelCase ,return_tensors='pt' ).input_values
self.assertEquals(input_values.shape ,(1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] ,UpperCamelCase ,atol=1e-6 ) )
def _lowerCamelCase ( self : List[Any] ) -> Any:
# fmt: off
_lowercase : Dict = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
_lowercase : str = self._load_datasamples(1 )
_lowercase : Union[str, Any] = SpeechTaFeatureExtractor()
_lowercase : Union[str, Any] = feature_extractor(audio_target=UpperCamelCase ,return_tensors='pt' ).input_values
self.assertEquals(input_values.shape ,(1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] ,UpperCamelCase ,atol=1e-4 ) ) | 125 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
A = '''hf-internal-testing/tiny-random-bert'''
A = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
A = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
_lowercase : Tuple = cached_file(UpperCamelCase ,UpperCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase ,UpperCamelCase ) ) )
with open(os.path.join(UpperCamelCase ,'refs' ,'main' ) ) as f:
_lowercase : Optional[Any] = f.read()
self.assertEqual(UpperCamelCase ,os.path.join(UpperCamelCase ,'snapshots' ,UpperCamelCase ,UpperCamelCase ) )
self.assertTrue(os.path.isfile(UpperCamelCase ) )
# File is cached at the same place the second time.
_lowercase : Optional[int] = cached_file(UpperCamelCase ,UpperCamelCase )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
# Using a specific revision to test the full commit hash.
_lowercase : Tuple = cached_file(UpperCamelCase ,UpperCamelCase ,revision='9b8c223' )
self.assertEqual(UpperCamelCase ,os.path.join(UpperCamelCase ,'snapshots' ,UpperCamelCase ,UpperCamelCase ) )
def _lowerCamelCase ( self : Any ) -> Optional[int]:
with self.assertRaisesRegex(UpperCamelCase ,'is not a valid model identifier' ):
_lowercase : List[str] = cached_file('tiny-random-bert' ,UpperCamelCase )
with self.assertRaisesRegex(UpperCamelCase ,'is not a valid git identifier' ):
_lowercase : Tuple = cached_file(UpperCamelCase ,UpperCamelCase ,revision='aaaa' )
with self.assertRaisesRegex(UpperCamelCase ,'does not appear to have a file named' ):
_lowercase : Tuple = cached_file(UpperCamelCase ,'conf' )
def _lowerCamelCase ( self : Optional[Any] ) -> List[str]:
with self.assertRaisesRegex(UpperCamelCase ,'does not appear to have a file named' ):
_lowercase : Tuple = cached_file(UpperCamelCase ,'conf' )
with open(os.path.join(UpperCamelCase ,'refs' ,'main' ) ) as f:
_lowercase : Union[str, Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase ,'.no_exist' ,UpperCamelCase ,'conf' ) ) )
_lowercase : Dict = cached_file(UpperCamelCase ,'conf' ,_raise_exceptions_for_missing_entries=UpperCamelCase )
self.assertIsNone(UpperCamelCase )
_lowercase : Optional[Any] = cached_file(UpperCamelCase ,'conf' ,local_files_only=UpperCamelCase ,_raise_exceptions_for_missing_entries=UpperCamelCase )
self.assertIsNone(UpperCamelCase )
_lowercase : List[Any] = mock.Mock()
_lowercase : Dict = 500
_lowercase : List[Any] = {}
_lowercase : List[Any] = HTTPError
_lowercase : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=UpperCamelCase ) as mock_head:
_lowercase : List[str] = cached_file(UpperCamelCase ,'conf' ,_raise_exceptions_for_connection_errors=UpperCamelCase )
self.assertIsNone(UpperCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self : Any ) -> Optional[int]:
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' ,UpperCamelCase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' ,UpperCamelCase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' ,UpperCamelCase ) )
def _lowerCamelCase ( self : Any ) -> Any:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' ,'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase ,'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' ,UpperCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase ,'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' ,UpperCamelCase ,revision='ahaha' )
_lowercase : int = get_file_from_repo('bert-base-cased' ,UpperCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowercase : List[str] = json.loads(open(UpperCamelCase ,'r' ).read() )
self.assertEqual(config['hidden_size'] ,768 )
def _lowerCamelCase ( self : Any ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : str = Path(UpperCamelCase ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase ,'a.txt' ) ,str(UpperCamelCase ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase ,'b.txt' ) ) | 125 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Optional[Any] , __a : List[str]=13 , __a : Dict=7 , __a : Optional[Any]=True , __a : Tuple=True , __a : Optional[Any]=True , __a : Dict=True , __a : Optional[int]=99 , __a : Tuple=64 , __a : Any=5 , __a : Dict=4 , __a : Union[str, Any]=37 , __a : List[str]="gelu" , __a : int=0.1 , __a : Any=0.1 , __a : int=512 , __a : Union[str, Any]=16 , __a : Optional[Any]=2 , __a : Optional[int]=0.02 , __a : Dict=3 , __a : List[str]=4 , __a : Any=None , ) -> int:
"""simple docstring"""
__lowercase : str = parent
__lowercase : Tuple = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : Optional[Any] = is_training
__lowercase : List[Any] = use_input_mask
__lowercase : Union[str, Any] = use_token_type_ids
__lowercase : List[str] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : List[Any] = hidden_act
__lowercase : Dict = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : int = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : int = initializer_range
__lowercase : Dict = num_labels
__lowercase : Dict = num_choices
__lowercase : Optional[int] = scope
__lowercase : Optional[Any] = vocab_size - 1
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Union[str, Any] = None
if self.use_input_mask:
__lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[Any] = None
if self.use_labels:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase : Tuple = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : List[Any] , __a : Dict ) -> int:
"""simple docstring"""
__lowercase : Tuple = GPTNeoXModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a )
__lowercase : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , __a : Any , __a : Optional[int] , __a : str ) -> int:
"""simple docstring"""
__lowercase : List[str] = True
__lowercase : List[str] = GPTNeoXModel(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , __a : str , __a : Tuple , __a : Any , __a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Any , __a : Optional[Any] , __a : List[str] , __a : str , __a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = self.num_labels
__lowercase : List[str] = GPTNeoXForQuestionAnswering(__a )
model.to(__a )
model.eval()
__lowercase : List[str] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[str] , __a : Any , __a : List[Any] , __a : Union[str, Any] , __a : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = self.num_labels
__lowercase : Optional[Any] = GPTNeoXForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Dict = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] , __a : Union[str, Any] , __a : Any , __a : List[str] , __a : Any ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.num_labels
__lowercase : Optional[int] = GPTNeoXForTokenClassification(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __a : str , __a : Optional[int] , __a : Any ) -> Any:
"""simple docstring"""
__lowercase : Any = True
__lowercase : Tuple = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
__lowercase : int = model(__a , attention_mask=__a , use_cache=__a )
__lowercase : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase : Union[str, Any] = model(__a , attention_mask=__a , output_hidden_states=__a )
__lowercase : Optional[int] = output_from_no_past["""hidden_states"""][0]
__lowercase : List[str] = model(
__a , attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["""hidden_states"""][0]
# select random slice
__lowercase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = self.prepare_config_and_inputs()
__lowercase : Optional[Any] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_A : List[Any] = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : Any = False
_A : Optional[Any] = False
_A : List[str] = False
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = GPTNeoXModelTester(self )
__lowercase : int = ConfigTester(self , config_class=__a , hidden_size=64 , num_attention_heads=8 )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase : Dict = None
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__a , __a , __a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCAmelCase ( self : int , __a : int ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Tuple = ids_tensor([1, 10] , config.vocab_size )
__lowercase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase : List[Any] = GPTNeoXModel(__a )
original_model.to(__a )
original_model.eval()
__lowercase : Union[str, Any] = original_model(__a ).last_hidden_state
__lowercase : Tuple = original_model(__a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
__lowercase : str = GPTNeoXModel(__a )
scaled_model.to(__a )
scaled_model.eval()
__lowercase : List[Any] = scaled_model(__a ).last_hidden_state
__lowercase : List[str] = scaled_model(__a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__a , __a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__a , __a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__a , __a , atol=1E-5 ) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : int = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
__lowercase : List[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__a )
__lowercase : Union[str, Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowercase : Any = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
__lowercase : Tuple = model.generate(**__a , do_sample=__a , max_new_tokens=20 )
__lowercase : List[Any] = tokenizer.batch_decode(__a )[0]
self.assertEqual(__a , __a ) | 719 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 649 | 0 |
from __future__ import annotations
from statistics import mean
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = burst_time[i]
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_UpperCAmelCase = []
_UpperCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
_UpperCAmelCase = 0
_UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
_UpperCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase = 4
__lowerCAmelCase = [2, 5, 3, 7]
__lowerCAmelCase = [0, 0, 0, 0]
__lowerCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 684 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 1 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] ):
_a = 0
_a = 0
_a = {}
def UpperCamelCase__ ( self : Optional[Any] , __a : Union[str, Any] ):
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def UpperCamelCase__ ( self : Dict , __a : str , __a : List[Any] , __a : Union[str, Any] ):
self.add_vertex(__a )
self.add_vertex(__a )
if head == tail:
return
_a = weight
_a = weight
def UpperCamelCase__ ( self : List[str] ):
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(__a ) ):
_a = list(edges[i] )
edges.sort(key=lambda __a : e[2] )
for i in range(len(__a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self : Any ):
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip("\n" )
def UpperCamelCase__ ( self : int ):
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCamelCase__ ( self : Any ):
return self.adjacency.keys()
@staticmethod
def UpperCamelCase__ ( __a : Any=None , __a : Optional[Any]=None ):
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(__a )
for edge in edges:
g.add_edge(*__a )
return g
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int ):
_a = {}
_a = {}
def __len__( self : Union[str, Any] ):
return len(self.parent )
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[str] ):
if item in self.parent:
return self.find(__a )
_a = item
_a = 0
return item
def UpperCamelCase__ ( self : int , __a : Optional[Any] ):
if item not in self.parent:
return self.make_set(__a )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def UpperCamelCase__ ( self : Union[str, Any] , __a : int , __a : str ):
_a = self.find(__a )
_a = self.find(__a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def UpperCamelCase__ ( __a : str ):
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(__a )
_a = union_find.find(__a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(__a ) != union_find.find(__a ):
union_find.union(__a , __a )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=__a )
return mst
| 521 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCAmelCase_ : Dict = datasets.utils.logging.get_logger(__name__)
lowerCAmelCase_ : str = ['names', 'prefix']
lowerCAmelCase_ : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowerCAmelCase_ : Tuple = ['encoding_errors', 'on_bad_lines']
lowerCAmelCase_ : Dict = ['date_format']
@dataclass
class __SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
"""simple docstring"""
__a =","
__a =None
__a ="infer"
__a =None
__a =None
__a =None
__a =None
__a =None
__a =True
__a =None
__a =None
__a =None
__a =None
__a =False
__a =None
__a =None
__a =None
__a =True
__a =True
__a =False
__a =True
__a =None
__a ="."
__a =None
__a ='"'
__a =0
__a =None
__a =None
__a =None
__a =None
__a =True
__a =True
__a =0
__a =True
__a =False
__a =None
__a =1_0000
__a =None
__a ="strict"
__a ="error"
__a =None
def UpperCamelCase__ ( self : Dict ):
if self.delimiter is not None:
_a = self.delimiter
if self.column_names is not None:
_a = self.column_names
@property
def UpperCamelCase__ ( self : List[Any] ):
_a = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__a =CsvConfig
def UpperCamelCase__ ( self : Dict ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase__ ( self : int , __a : List[Any] ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
_a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
_a = data_files
if isinstance(__a , __a ):
_a = [files]
_a = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_a = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
_a = [files]
_a = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"files": files} ) )
return splits
def UpperCamelCase__ ( self : Dict , __a : pa.Table ):
if self.config.features is not None:
_a = self.config.features.arrow_schema
if all(not require_storage_cast(__a ) for feature in self.config.features.values() ):
# cheaper cast
_a = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_a = table_cast(__a , __a )
return pa_table
def UpperCamelCase__ ( self : Tuple , __a : str ):
_a = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_a = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
_a = pd.read_csv(__a , iterator=__a , dtype=__a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__a ):
_a = pa.Table.from_pandas(__a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__a )}: {e}' )
raise
| 521 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
a :List[str] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
a :int = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :List[str] = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
a :Optional[int] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
a :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = '''adapt act apte'''
a :int = '''adapt act apte'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a :int = '''adapt act apte'''
a :List[Any] = ['''adapt''', '''act''', '''ap@@''', '''te''']
a :int = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
a :str = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
a :Tuple = '''I am a small frog.'''
a :Optional[int] = tok([src_text] , padding=_lowerCamelCase , truncation=_lowerCamelCase )['''input_ids''']
a :List[Any] = tok.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
a :Optional[Any] = '''I am a small frog .'''
a :int = '''.'''
a :List[str] = tok(_lowerCamelCase )['''input_ids''']
a :Union[str, Any] = tok(_lowerCamelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 445 |
import qiskit
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
a :Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
a :str = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
a :Optional[Any] = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case : Any = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 445 | 1 |
import os
from collections.abc import Iterator
def __lowerCamelCase ( __a :str = "." ) -> str:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_SCREAMING_SNAKE_CASE ):
A__ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).lstrip("""./""" )
def __lowerCamelCase ( __a :Any ) -> Optional[int]:
"""simple docstring"""
return F'{i * " "}*' if i else "\n##"
def __lowerCamelCase ( __a :str , __a :str ) -> Any:
"""simple docstring"""
A__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_SCREAMING_SNAKE_CASE )} {new_part.replace("_" , " " ).title()}' )
return new_path
def __lowerCamelCase ( __a :str = "." ) -> Dict:
"""simple docstring"""
A__ = """"""
for filepath in sorted(good_file_paths(_SCREAMING_SNAKE_CASE ) ):
A__ , A__ = os.path.split(_SCREAMING_SNAKE_CASE )
if filepath != old_path:
A__ = print_path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A__ = (filepath.count(os.sep ) + 1) if filepath else 0
A__ = F'{filepath}/{filename}'.replace(""" """ , """%20""" )
A__ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'{md_prefix(_SCREAMING_SNAKE_CASE )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 707 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class A (datasets.BuilderConfig ):
'''simple docstring'''
__lowerCamelCase : Optional[datasets.Features] = None
class A (datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = PandasConfig
def a_ ( self : str ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def a_ ( self : Dict , __lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCAmelCase , (str, list, tuple) ):
A__ = data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def a_ ( self : Tuple , __lowerCAmelCase : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(__lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ):
with open(__lowerCAmelCase , """rb""" ) as f:
A__ = pa.Table.from_pandas(pd.read_pickle(__lowerCAmelCase ) )
yield i, self._cast_table(__lowerCAmelCase )
| 247 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
SCREAMING_SNAKE_CASE_ = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
SCREAMING_SNAKE_CASE_ = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
SCREAMING_SNAKE_CASE_ = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 34 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
A__ = logging.get_logger(__name__)
class a :
__lowerCAmelCase : Optional[Any] = None
@experimental
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return _map_with_joblib(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] = num_proc if num_proc <= len(__lowerCAmelCase ) else len(__lowerCAmelCase )
snake_case__ : int = [] # We organize the splits ourselve (contiguous splits)
for index in range(__lowerCAmelCase ):
snake_case__ : List[Any] = len(__lowerCAmelCase ) // num_proc
snake_case__ : Tuple = len(__lowerCAmelCase ) % num_proc
snake_case__ : Any = div * index + min(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : int = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__lowerCAmelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(__lowerCAmelCase )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(__lowerCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
snake_case__ , snake_case__ : List[str] = None, None
if not disable_tqdm:
snake_case__ , snake_case__ : Any = (RLock(),), tqdm.set_lock
with Pool(__lowerCAmelCase , initargs=__lowerCAmelCase , initializer=__lowerCAmelCase ) as pool:
snake_case__ : Optional[int] = pool.map(__lowerCAmelCase , __lowerCAmelCase )
logger.info(f"""Finished {num_proc} processes""" )
snake_case__ : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(__lowerCAmelCase )} objects""" )
return mapped
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__lowerCAmelCase ):
return joblib.Parallel()(
joblib.delayed(__lowerCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Tuple = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case__ : int = None
| 252 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__magic_name__ =logging.get_logger(__name__)
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Any =["pixel_values"]
def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = size if size is not None else {'''shortest_edge''': 256}
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCamelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase__ = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 469 | import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__magic_name__ =logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=768 ) -> Dict:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = proj_size
UpperCamelCase__ = CLIPVisionModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = PaintByExampleMapper(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = nn.LayerNorm(config.hidden_size )
UpperCamelCase__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCamelCase__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model(pixel_values=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = clip_output.pooler_output
UpperCamelCase__ = self.mapper(latent_states[:, None] )
UpperCamelCase__ = self.final_layer_norm(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.proj_out(SCREAMING_SNAKE_CASE_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _A ( nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
super().__init__()
UpperCamelCase__ = (config.num_hidden_layers + 1) // 5
UpperCamelCase__ = config.hidden_size
UpperCamelCase__ = 1
UpperCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation_fn='''gelu''' , attention_bias=SCREAMING_SNAKE_CASE_ )
for _ in range(SCREAMING_SNAKE_CASE_ )
] )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
for block in self.blocks:
UpperCamelCase__ = block(SCREAMING_SNAKE_CASE_ )
return hidden_states
| 469 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
def __init__( self, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['bs4'])
super().__init__(**lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = []
_lowercase : List[str] = []
_lowercase : str = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_lowercase : Optional[int] = parent.find_all(child.name, recursive=lowerCamelCase)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(lowerCamelCase) else next(i for i, s in enumerate(lowerCamelCase, 1) if s is child))
_lowercase : str = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : int = BeautifulSoup(lowerCamelCase, 'html.parser')
_lowercase : List[str] = []
_lowercase : Dict = []
_lowercase : Any = []
for element in html_code.descendants:
if type(lowerCamelCase) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
_lowercase : str = html.unescape(lowerCamelCase).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowerCamelCase)
_lowercase , _lowercase : Union[str, Any] = self.xpath_soup(lowerCamelCase)
stringaxtag_seq.append(lowerCamelCase)
stringaxsubs_seq.append(lowerCamelCase)
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError('Number of doc strings and xtags does not correspond')
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError('Number of doc strings and xsubs does not correspond')
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ''
for tagname, subs in zip(lowerCamelCase, lowerCamelCase):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self, lowerCamelCase) -> BatchFeature:
"""simple docstring"""
_lowercase : Optional[int] = False
# Check that strings has a valid type
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = True
elif isinstance(lowerCamelCase, (list, tuple)):
if len(lowerCamelCase) == 0 or isinstance(html_strings[0], lowerCamelCase):
_lowercase : Dict = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F'''but is of type {type(lowerCamelCase)}.''')
_lowercase : Optional[Any] = bool(isinstance(lowerCamelCase, (list, tuple)) and (isinstance(html_strings[0], lowerCamelCase)))
if not is_batched:
_lowercase : List[Any] = [html_strings]
# Get nodes + xpaths
_lowercase : Any = []
_lowercase : int = []
for html_string in html_strings:
_lowercase , _lowercase , _lowercase : Optional[int] = self.get_three_from_single(lowerCamelCase)
nodes.append(lowerCamelCase)
_lowercase : Any = []
for node, tag_list, sub_list in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : int = self.construct_xpath(lowerCamelCase, lowerCamelCase)
xpath_strings.append(lowerCamelCase)
xpaths.append(lowerCamelCase)
# return as Dict
_lowercase : Union[str, Any] = {'nodes': nodes, 'xpaths': xpaths}
_lowercase : Any = BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase)
return encoded_inputs
| 89 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __A ( self: Tuple ) -> Dict:
_A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , '''width_multiplier''' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Dict , __A: Optional[Any] , __A: Optional[int]=13 , __A: Union[str, Any]=64 , __A: Dict=2 , __A: str=3 , __A: Dict="swish" , __A: List[str]=3 , __A: Union[str, Any]=32 , __A: str=0.1 , __A: int=0.02 , __A: Optional[Any]=True , __A: str=True , __A: List[Any]=10 , __A: Dict=None , __A: Optional[Any]=0.25 , __A: Optional[int]=0.0 , __A: Tuple=0.0 , ) -> Optional[int]:
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = make_divisible(5_12 * width_multiplier , divisor=8 )
_A = hidden_act
_A = conv_kernel_size
_A = output_stride
_A = classifier_dropout_prob
_A = use_labels
_A = is_training
_A = num_labels
_A = initializer_range
_A = scope
_A = width_multiplier
_A = ffn_dropout
_A = attn_dropout
def __A ( self: Dict ) -> List[str]:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self: Tuple ) -> Optional[Any]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __A ( self: Dict , __A: Union[str, Any] , __A: int , __A: Dict , __A: List[str] ) -> str:
_A = MobileViTVaModel(config=__A )
model.to(__A )
model.eval()
_A = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self: str , __A: int , __A: Optional[Any] , __A: int , __A: Tuple ) -> Any:
_A = self.num_labels
_A = MobileViTVaForImageClassification(__A )
model.to(__A )
model.eval()
_A = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self: List[Any] , __A: Optional[Any] , __A: Tuple , __A: int , __A: List[Any] ) -> Optional[Any]:
_A = self.num_labels
_A = MobileViTVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
_A = model(__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_A = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self: Dict ) -> List[Any]:
_A = self.prepare_config_and_inputs()
_A ,_A ,_A ,_A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: str ) -> Optional[Any]:
_A = MobileViTVaModelTester(self )
_A = MobileViTVaConfigTester(self , config_class=__A , has_text_modality=__A )
def __A ( self: Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __A ( self: Any ) -> List[str]:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __A ( self: int ) -> Any:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __A ( self: Optional[Any] ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __A ( self: Any ) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: Optional[int] ) -> List[str]:
pass
def __A ( self: List[Any] ) -> Optional[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def __A ( self: List[str] ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __A ( self: str ) -> int:
def check_hidden_states_output(__A: List[str] , __A: str , __A: Optional[int] ):
_A = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(__A , __A ) )
_A = outputs.hidden_states
_A = 5
self.assertEqual(len(__A ) , __A )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_A = 2
for i in range(len(__A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(__A , __A , __A )
def __A ( self: str ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __A ( self: int ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __A ( self: Dict ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = MobileViTVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __A ( ):
'''simple docstring'''
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self: int ) -> Optional[Any]:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __A ( self: Optional[Any] ) -> Optional[int]:
_A = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__A )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
_A = model(**__A )
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __A )
_A = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@slow
def __A ( self: List[str] ) -> Tuple:
_A = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_A = model.to(__A )
_A = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
_A = model(**__A )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __A )
_A = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4 ) )
@slow
def __A ( self: List[Any] ) -> Optional[int]:
_A = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_A = model.to(__A )
_A = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
_A = model(**__A )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(50, 60)] )
_A = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __A )
_A = image_processor.post_process_semantic_segmentation(outputs=__A )
_A = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __A )
| 484 | 0 |
'''simple docstring'''
import operator
def A__ ( A : list , A : bool = False , A : list | None = None):
'''simple docstring'''
UpperCamelCase : List[Any] = operator.lt if reverse else operator.gt
UpperCamelCase : Union[str, Any] = solution or []
if not arr:
return solution
UpperCamelCase : Optional[Any] = [arr.pop(0)]
for i, item in enumerate(A):
if _operator(A , sublist[-1]):
sublist.append(A)
arr.pop(A)
# merging sublist into solution list
if not solution:
solution.extend(A)
else:
while sublist:
UpperCamelCase : str = sublist.pop(0)
for i, xx in enumerate(A):
if not _operator(A , A):
solution.insert(A , A)
break
else:
solution.append(A)
strand_sort(A , A , A)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 435 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 435 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any=13 , lowerCAmelCase : Any=32 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Dict=4 , lowerCAmelCase : str=[10, 20, 30, 40] , lowerCAmelCase : List[Any]=[2, 2, 3, 2] , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Union[str, Any]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : Optional[Any]=["stage2", "stage3", "stage4"] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Any=None , ) -> int:
"""simple docstring"""
_snake_case : str = parent
_snake_case : Any = batch_size
_snake_case : Union[str, Any] = image_size
_snake_case : int = num_channels
_snake_case : List[str] = num_stages
_snake_case : List[str] = hidden_sizes
_snake_case : int = depths
_snake_case : List[str] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : int = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Union[str, Any] = num_labels
_snake_case : int = scope
_snake_case : List[Any] = num_stages
def UpperCamelCase_ ( self : List[str]) -> List[str]:
"""simple docstring"""
_snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_snake_case : str = None
if self.use_labels:
_snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Any) -> List[str]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCamelCase_ ( self : Any) -> List[str]:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Optional[Any]) -> int:
"""simple docstring"""
_snake_case : Optional[int] = UperNetForSemanticSegmentation(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
_snake_case : Tuple = model(lowerCAmelCase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCamelCase_ ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_snake_case : str = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[Any] = config_and_inputs
_snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
snake_case_ : List[Any] = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
snake_case_ : Any = False
snake_case_ : int = False
snake_case_ : str = False
snake_case_ : List[Any] = False
snake_case_ : Tuple = False
snake_case_ : Union[str, Any] = False
def UpperCamelCase_ ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[str] = UperNetModelTester(self)
_snake_case : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37)
def UpperCamelCase_ ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : str) -> Union[str, Any]:
"""simple docstring"""
return
def UpperCamelCase_ ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = model_class(lowerCAmelCase)
_snake_case : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase)
def UpperCamelCase_ ( self : int) -> str:
"""simple docstring"""
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase)
@unittest.skip(reason="""UperNet does not use inputs_embeds""")
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""")
def UpperCamelCase_ ( self : List[str]) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""")
def UpperCamelCase_ ( self : Tuple) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""")
def UpperCamelCase_ ( self : str) -> str:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""")
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Optional[Any]) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any]):
_snake_case : str = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase))
_snake_case : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : str = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Dict = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCamelCase_ ( self : Union[str, Any]) -> int:
"""simple docstring"""
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : List[str] = _config_zero_init(lowerCAmelCase)
_snake_case : int = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_snake_case : str = model_class(config=lowerCAmelCase)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="""UperNet does not have tied weights""")
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
@slow
def UpperCamelCase_ ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Tuple = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def lowercase ( ) -> Union[str, Any]:
_snake_case : str = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case : int = Image.open(SCREAMING_SNAKE_CASE__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""")
_snake_case : List[str] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""").to(lowerCAmelCase)
_snake_case : Optional[int] = prepare_img()
_snake_case : Dict = processor(images=lowerCAmelCase , return_tensors="""pt""").to(lowerCAmelCase)
with torch.no_grad():
_snake_case : List[str] = model(**lowerCAmelCase)
_snake_case : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape , lowerCAmelCase)
_snake_case : Any = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]]).to(lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase , atol=1E-4))
def UpperCamelCase_ ( self : int) -> List[str]:
"""simple docstring"""
_snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""")
_snake_case : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""").to(lowerCAmelCase)
_snake_case : Tuple = prepare_img()
_snake_case : Optional[Any] = processor(images=lowerCAmelCase , return_tensors="""pt""").to(lowerCAmelCase)
with torch.no_grad():
_snake_case : int = model(**lowerCAmelCase)
_snake_case : str = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape , lowerCAmelCase)
_snake_case : Optional[int] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]]).to(lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase , atol=1E-4))
| 477 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case ( enum.Enum ):
'''simple docstring'''
snake_case_ : Any = 0
snake_case_ : Tuple = 1
snake_case_ : int = 2
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case : int = None
if self.model.config.prefix is not None:
_snake_case : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case : List[str] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case : Optional[int] = self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params)
_snake_case : Dict = {**self._preprocess_params, **preprocess_params}
_snake_case : Optional[int] = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : int=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[Any] = {}
if prefix is not None:
_snake_case : Tuple = prefix
if prefix:
_snake_case : Optional[int] = self.tokenizer(
lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework)
_snake_case : Any = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""")
_snake_case : Tuple = handle_long_generation
preprocess_params.update(lowerCAmelCase)
_snake_case : str = generate_kwargs
_snake_case : Optional[int] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
_snake_case : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
_snake_case : Tuple = ReturnType.TENSORS
if return_type is not None:
_snake_case : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
_snake_case : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case : str = self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
if len(lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
_snake_case : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase)
def __call__( self : Tuple , lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
return super().__call__(lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : str="" , lowerCAmelCase : Any=None , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
_snake_case : Dict = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework)
_snake_case : Tuple = prompt_text
if handle_long_generation == "hole":
_snake_case : int = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case : Optional[int] = generate_kwargs["""max_new_tokens"""]
else:
_snake_case : Dict = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
_snake_case : List[Any] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case : Optional[int] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : int , lowerCAmelCase : Dict , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = model_inputs["""input_ids"""]
_snake_case : List[Any] = model_inputs.get("""attention_mask""" , lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Any = 1
else:
_snake_case : List[Any] = input_ids.shape[0]
_snake_case : Tuple = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case : Any = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
_snake_case : Dict = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case : Optional[int] = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case : str = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case : Optional[int] = self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Tuple = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case : List[Any] = generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
_snake_case : Dict = tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=ReturnType.FULL_TEXT , lowerCAmelCase : Union[str, Any]=True) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = model_outputs["""generated_sequence"""][0]
_snake_case : List[str] = model_outputs["""input_ids"""]
_snake_case : Optional[Any] = model_outputs["""prompt_text"""]
_snake_case : str = generated_sequence.numpy().tolist()
_snake_case : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case : Union[str, Any] = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case : int = self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case : str = 0
else:
_snake_case : List[Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
_snake_case : Any = prompt_text + text[prompt_length:]
else:
_snake_case : Union[str, Any] = text[prompt_length:]
_snake_case : List[str] = {"""generated_text""": all_text}
records.append(lowerCAmelCase)
return records
| 477 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
"""simple docstring"""
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return x if y == 0 else greatest_common_divisor(snake_case , x % y )
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return (x * y) // greatest_common_divisor(snake_case , snake_case )
def UpperCAmelCase ( snake_case : int = 20 ):
_lowerCAmelCase:List[Any] = 1
for i in range(1 , n + 1 ):
_lowerCAmelCase:List[str] = lcm(snake_case , snake_case )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 439 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class a__ ( _snake_case ):
__magic_name__ : int = "audio-spectrogram-transformer"
def __init__(self : List[str], __UpperCAmelCase : List[str]=768, __UpperCAmelCase : Tuple=12, __UpperCAmelCase : Tuple=12, __UpperCAmelCase : str=3072, __UpperCAmelCase : int="gelu", __UpperCAmelCase : Optional[Any]=0.0, __UpperCAmelCase : Any=0.0, __UpperCAmelCase : int=0.02, __UpperCAmelCase : Tuple=1e-12, __UpperCAmelCase : Optional[int]=16, __UpperCAmelCase : Optional[Any]=True, __UpperCAmelCase : Dict=10, __UpperCAmelCase : Optional[Any]=10, __UpperCAmelCase : Union[str, Any]=1024, __UpperCAmelCase : List[Any]=128, **__UpperCAmelCase : Optional[int], ) -> List[Any]:
"""simple docstring"""
super().__init__(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = qkv_bias
SCREAMING_SNAKE_CASE : List[str] = frequency_stride
SCREAMING_SNAKE_CASE : Optional[Any] = time_stride
SCREAMING_SNAKE_CASE : int = max_length
SCREAMING_SNAKE_CASE : int = num_mel_bins
| 507 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _snake_case ( datasets.BeamBasedBuilder ):
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
class _snake_case ( datasets.BeamBasedBuilder ):
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
def __lowerCamelCase ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def __lowerCamelCase ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class _snake_case ( _snake_case ):
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :Optional[Any] = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
a :str = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowerCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
import apache_beam as beam
a :Any = beam.io.parquetio.WriteToParquet
a :Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :Union[str, Any] = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
a :str = partial(_lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
a :List[str] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :Dict = DummyBeamDataset(cache_dir=_lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a :List[Any] = NestedBeamDataset(cache_dir=_lowerCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
a :Any = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _lowerCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowerCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 445 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def __a ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=None ) -> Any:
'''simple docstring'''
if "." in tensor_name:
lowercase_ = tensor_name.split("." )
for split in splits[:-1]:
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase_ = value.to("cpu" )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
lowercase_ = torch.tensor(__lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCamelCase ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(__lowerCamelCase ) )
else:
if value is None:
lowercase_ = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase_ = value.to(__lowerCamelCase )
else:
lowercase_ = torch.tensor(__lowerCamelCase , device=__lowerCamelCase )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(__lowerCamelCase , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def __a ( __lowerCamelCase : int , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=False ) -> Tuple:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(__lowerCamelCase )
if (isinstance(__lowerCamelCase , nn.Linear ) or isinstance(__lowerCamelCase , __lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(__lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(__lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCamelCase )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_been_replaced=__lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __a ( __lowerCamelCase : int , __lowerCamelCase : List[Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
lowercase_ = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def __a ( *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , __lowerCamelCase , )
return replace_with_bnb_linear(*__lowerCamelCase , **__lowerCamelCase )
def __a ( *__lowerCamelCase : int , **__lowerCamelCase : Any ) -> int:
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , __lowerCamelCase , )
return set_module_quantized_tensor_to_device(*__lowerCamelCase , **__lowerCamelCase )
def __a ( __lowerCamelCase : Dict ) -> str:
'''simple docstring'''
lowercase_ = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(__lowerCamelCase , [] )
lowercase_ = len(__lowerCamelCase ) > 0
# Check if it is a base model
lowercase_ = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(__lowerCamelCase ) - set(__lowerCamelCase )
lowercase_ = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowercase_ = [".weight", ".bias"]
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(__lowerCamelCase , "" )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
| 461 | '''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCAmelCase_ : Optional[Any] = "bert-base-cased"
lowerCAmelCase_ : Any = "fp16"
lowerCAmelCase_ : Union[str, Any] = "bf16"
lowerCAmelCase_ : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase ( __lowerCamelCase ):
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
super().setUp()
lowercase_ = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = F'{i + 1}'
lowercase_ = strategy
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1))
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = prefetch_policy
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1))
def __UpperCAmelCase ( self : Dict) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = state_dict_type
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
lowercase_ = AutoModel.from_pretrained(__lowerCAmelCase)
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase_ = self.dist_env.copy()
lowercase_ = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase_ = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
lowercase_ = "2000"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
lowercase_ = self.dist_env.copy()
lowercase_ = "TRANSFORMER_BASED_WRAP"
lowercase_ = "T5Layer"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
with self.assertRaises(__lowerCAmelCase) as cm:
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception))
lowercase_ = self.dist_env.copy()
lowercase_ = "SIZE_BASED_WRAP"
lowercase_ = "0"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase_ = self.dist_env.copy()
lowercase_ = mp_dtype
with mockenv_context(**__lowerCAmelCase):
lowercase_ = Accelerator()
if mp_dtype == "fp16":
lowercase_ = torch.floataa
elif mp_dtype == "bf16":
lowercase_ = torch.bfloataa
lowercase_ = MixedPrecision(param_dtype=__lowerCAmelCase , reduce_dtype=__lowerCAmelCase , buffer_dtype=__lowerCAmelCase)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __lowerCAmelCase)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __lowerCAmelCase))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(__lowerCAmelCase)
def __UpperCAmelCase ( self : List[str]) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase_ = self.dist_env.copy()
lowercase_ = str(__lowerCAmelCase).lower()
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__lowerCAmelCase))
@require_fsdp
@require_multi_gpu
@slow
class lowercase ( __lowerCamelCase ):
def __UpperCAmelCase ( self : Optional[int]) -> str:
super().setUp()
lowercase_ = 0.82
lowercase_ = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
lowercase_ = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase_ = 160
lowercase_ = 160
lowercase_ = inspect.getfile(accelerate.test_utils)
lowercase_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"])
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
lowercase_ = os.path.join(self.test_scripts_folder , "test_performance.py")
lowercase_ = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
lowercase_ = cmd.copy()
for i, strategy in enumerate(__lowerCAmelCase):
if strategy.lower() in config:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no")
else:
cmd_config.append("--mixed_precision=fp16")
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}')
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--performance_lower_bound={self.performance_lower_bound}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
def __UpperCAmelCase ( self : Dict) -> Dict:
lowercase_ = os.path.join(self.test_scripts_folder , "test_checkpointing.py")
lowercase_ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(__lowerCAmelCase):
lowercase_ = cmd.copy()
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
if strategy != "FULL_SHARD":
continue
lowercase_ = len(__lowerCAmelCase)
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase_ = cmd_config[:state_dict_config_index]
cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}')
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
"--partial_train_epoch=1",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
lowercase_ = cmd_config[:-1]
lowercase_ = os.path.join(self.tmpdir , "epoch_0")
cmd_config.extend(
[
F'--resume_from_checkpoint={resume_from_checkpoint}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
def __UpperCAmelCase ( self : Optional[int]) -> int:
lowercase_ = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py")
lowercase_ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase_ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"])
else:
cmd_config.extend(["--mixed_precision=no"])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"])
for i, strategy in enumerate(__lowerCAmelCase):
if strategy.lower() in spec:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}')
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--peak_memory_upper_bound={peak_mem_upper_bound}',
F'--n_train={self.n_train}',
F'--n_val={self.n_val}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
| 461 | 1 |
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = "Input must be a string of 8 numbers plus letter"
_lowerCAmelCase : List[str] = "TRWAGMYFPDXBNJZSQVHLCKE"
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> bool:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : Dict = f'Expected string as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}'
raise TypeError(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = spanish_id.replace("-" , "" ).upper()
if len(SCREAMING_SNAKE_CASE__ ) != 9:
raise ValueError(SCREAMING_SNAKE_CASE__ )
try:
_UpperCAmelCase : List[str] = int(spanish_id_clean[0:8] )
_UpperCAmelCase : Dict = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(SCREAMING_SNAKE_CASE__ ) from ex
if letter.isdigit():
raise ValueError(SCREAMING_SNAKE_CASE__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( _UpperCamelCase ):
def __lt__( self : int , A : Dict ):
return self[-1] < other[-1]
def __eq__( self : Union[str, Any] , A : Union[str, Any] ):
return self[-1] == other[-1]
def __snake_case ( SCREAMING_SNAKE_CASE__ : list ) -> list:
'''simple docstring'''
_UpperCAmelCase : list[Stack] = []
# sort into stacks
for element in collection:
_UpperCAmelCase : Any = Stack([element] )
_UpperCAmelCase : str = bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if i != len(SCREAMING_SNAKE_CASE__ ):
stacks[i].append(SCREAMING_SNAKE_CASE__ )
else:
stacks.append(SCREAMING_SNAKE_CASE__ )
# use a heap-based merge to merge stack efficiently
_UpperCAmelCase : Optional[Any] = merge(*(reversed(SCREAMING_SNAKE_CASE__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
_lowerCAmelCase : Any = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : int = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 289 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = BlipImageProcessor()
__magic_name__ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
__magic_name__ = BlipProcessor(A , A )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **A ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **A ).tokenizer
def __A ( self , **A ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **A ).image_processor
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__magic_name__ = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__magic_name__ = self.get_image_processor(do_normalize=A , padding_value=1.0 )
__magic_name__ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = BlipProcessor(tokenizer=A , image_processor=A )
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = image_processor(A , return_tensors='''np''' )
__magic_name__ = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = BlipProcessor(tokenizer=A , image_processor=A )
__magic_name__ = '''lower newer'''
__magic_name__ = processor(text=A )
__magic_name__ = tokenizer(A , return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = BlipProcessor(tokenizer=A , image_processor=A )
__magic_name__ = '''lower newer'''
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = BlipProcessor(tokenizer=A , image_processor=A )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.batch_decode(A )
__magic_name__ = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = BlipProcessor(tokenizer=A , image_processor=A )
__magic_name__ = '''lower newer'''
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=A , images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) | 678 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Union[str, Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 678 | 1 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowercase (_lowerCAmelCase ):
def decorator(_lowerCAmelCase ):
__lowerCAmelCase = getattr(_lowerCAmelCase , """handle_key""" , [] )
handle += [key]
setattr(_lowerCAmelCase , """handle_key""" , _lowerCAmelCase )
return func
return decorator
def lowercase (*_lowerCAmelCase ):
def decorator(_lowerCAmelCase ):
__lowerCAmelCase = getattr(_lowerCAmelCase , """handle_key""" , [] )
handle += keys
setattr(_lowerCAmelCase , """handle_key""" , _lowerCAmelCase )
return func
return decorator
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __new__( cls , snake_case_ , snake_case_ , snake_case_ ) -> Any:
__lowerCAmelCase = super().__new__(cls , snake_case_ , snake_case_ , snake_case_ )
if not hasattr(snake_case_ , """key_handler""" ):
setattr(snake_case_ , """key_handler""" , {} )
setattr(snake_case_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__lowerCAmelCase = getattr(snake_case_ , """handle_key""" , [] )
for key in handled_keys:
__lowerCAmelCase = value
return new_cls
@staticmethod
def A__ ( cls ) -> Tuple:
__lowerCAmelCase = get_character()
if char != KEYMAP["undefined"]:
__lowerCAmelCase = ord(snake_case_ )
__lowerCAmelCase = cls.key_handler.get(snake_case_ )
if handler:
__lowerCAmelCase = char
return handler(cls )
else:
return None
def lowercase (cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 465 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''mgp-str'''
def __init__( self , snake_case_=[32, 128] , snake_case_=4 , snake_case_=3 , snake_case_=27 , snake_case_=38 , snake_case_=50_257 , snake_case_=30_522 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=4.0 , snake_case_=True , snake_case_=False , snake_case_=1e-5 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=False , snake_case_=0.02 , **snake_case_ , ) -> List[Any]:
super().__init__(**snake_case_ )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = max_token_length
__lowerCAmelCase = num_character_labels
__lowerCAmelCase = num_bpe_labels
__lowerCAmelCase = num_wordpiece_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = distilled
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = drop_rate
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = attn_drop_rate
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = output_aa_attentions
__lowerCAmelCase = initializer_range
| 465 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , lowerCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
assert _test_patching.open is open
__UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , lowerCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , lowerCamelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , lowerCamelCase__ ) is None
with patch_submodule(_test_patching , '''len''' , lowerCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
__UpperCamelCase = patch_submodule(_test_patching , '''open''' , lowerCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__UpperCamelCase = '''__test_patch_submodule_successive_join__'''
__UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
__UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , lowerCamelCase__ ):
with patch_submodule(_test_patching , '''os.rename''' , lowerCamelCase__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , lowerCamelCase__ ):
with patch_submodule(_test_patching , '''os.path.join''' , lowerCamelCase__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , lowerCamelCase__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , lowerCamelCase__ ):
pass
| 721 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = ["model.decoder.embed_positions.weights"]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
__UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__UpperCamelCase = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__UpperCamelCase = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__UpperCamelCase = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__UpperCamelCase = list(state_dict.keys() )
__UpperCamelCase = {}
for key in keys:
__UpperCamelCase = state_dict.pop(lowercase_ )
__UpperCamelCase = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase = val[:hidden_size, :]
__UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase = val
else:
__UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__UpperCamelCase = 10_24
__UpperCamelCase = 24
__UpperCamelCase = 16
elif checkpoint == "medium":
__UpperCamelCase = 15_36
__UpperCamelCase = 48
__UpperCamelCase = 24
elif checkpoint == "large":
__UpperCamelCase = 20_48
__UpperCamelCase = 48
__UpperCamelCase = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
__UpperCamelCase = MusicgenDecoderConfig(
hidden_size=lowercase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , )
return config
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_="cpu" ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = MusicGen.get_pretrained(lowercase_ , device=lowercase_ )
__UpperCamelCase = decoder_config_from_checkpoint(lowercase_ )
__UpperCamelCase = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase = rename_state_dict(
lowercase_ , hidden_size=decoder_config.hidden_size )
__UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' )
__UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__UpperCamelCase = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(lowercase_ , strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(lowercase_ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=lowercase_ , audio_encoder=lowercase_ , decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
__UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase = model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__UpperCamelCase = MusicgenProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
__UpperCamelCase = 20_48
__UpperCamelCase = 20_48
# set other default generation config params
__UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase = True
__UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 375 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Dict = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
lowerCAmelCase : Any = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : int = ElectraTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE: Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCAmelCase ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE: List[Any] = getattr(_lowerCAmelCase , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = do_lower_case
__SCREAMING_SNAKE_CASE: int = strip_accents
__SCREAMING_SNAKE_CASE: Optional[int] = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE: Optional[int] = normalizer_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = do_lower_case
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = [self.sep_token_id]
__SCREAMING_SNAKE_CASE: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 202 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : str = logging.get_logger(__name__)
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Any = ['''pixel_values''']
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = size if size is not None else {'''shortest_edge''': 384}
__SCREAMING_SNAKE_CASE: List[str] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = do_resize
__SCREAMING_SNAKE_CASE: Optional[Any] = size
# Default value set here for backwards compatibility where the value in config is None
__SCREAMING_SNAKE_CASE: List[str] = crop_pct if crop_pct is not None else 224 / 256
__SCREAMING_SNAKE_CASE: List[Any] = resample
__SCREAMING_SNAKE_CASE: Any = do_rescale
__SCREAMING_SNAKE_CASE: Optional[Any] = rescale_factor
__SCREAMING_SNAKE_CASE: Any = do_normalize
__SCREAMING_SNAKE_CASE: Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE: Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE: str = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__SCREAMING_SNAKE_CASE: Dict = int(shortest_edge / crop_pct )
__SCREAMING_SNAKE_CASE: Any = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE: Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
__SCREAMING_SNAKE_CASE: Tuple = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE: Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE: Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE: Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE: Optional[int] = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE: str = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE: List[Any] = size if size is not None else self.size
__SCREAMING_SNAKE_CASE: Dict = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE: Tuple = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE: str = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE: Any = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE: int = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
__SCREAMING_SNAKE_CASE: str = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
__SCREAMING_SNAKE_CASE: List[str] = {'''pixel_values''': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 202 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase__ = 5_00_03
lowerCamelCase__ = 5_00_02
@require_sentencepiece
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = PLBartTokenizer
lowerCamelCase__ = None
lowerCamelCase__ = False
def __a ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ = PLBartTokenizer(_a , language_codes="base" , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Any:
lowerCAmelCase_ = PLBartTokenizer(_a , language_codes="base" , keep_accents=_a )
lowerCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowerCAmelCase_ = tokenizer.vocab_size
lowerCAmelCase_ = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 4 , _a )]
self.assertListEqual(_a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowerCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowerCAmelCase_ = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = PLBartTokenizer(_a , language_codes="multi" , keep_accents=_a )
lowerCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowerCAmelCase_ = tokenizer.vocab_size
lowerCAmelCase_ = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 7 , _a )]
self.assertListEqual(
_a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowerCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowerCAmelCase_ = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = '''uclanlp/plbart-python-en_XX'''
lowerCamelCase__ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
lowerCamelCase__ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
lowerCamelCase__ = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __a ( cls ) -> str:
lowerCAmelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowerCAmelCase_ = 1
return cls
def __a ( self ) -> Optional[int]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def __a ( self ) -> int:
self.assertIn(_a , self.tokenizer.all_special_ids )
lowerCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
lowerCAmelCase_ = self.tokenizer.decode(_a , skip_special_tokens=_a )
lowerCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def __a ( self ) -> str:
lowerCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _a )
lowerCAmelCase_ = 10
lowerCAmelCase_ = self.tokenizer(_a , max_length=_a , truncation=_a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _a )
self.assertEqual(len(_a ) , _a )
def __a ( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def __a ( self ) -> str:
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_a )
lowerCAmelCase_ = PLBartTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _a )
@require_torch
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors="pt" )
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __a ( self ) -> int:
lowerCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_a , truncation=_a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.tokenizer(self.src_text , padding=_a , truncation=_a , max_length=3 , return_tensors="pt" )
lowerCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_a , truncation=_a , max_length=10 , return_tensors="pt" )
lowerCAmelCase_ = targets["input_ids"]
lowerCAmelCase_ = shift_tokens_right(_a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(_a ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 226 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase__ = 1_00
lowerCamelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def A(__a: int ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase_ = set()
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A(__a: int = 5000 ):
for number_to_partition in range(1 , __a ):
if len(partition(__a ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 226 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : List[Any] = '''▁'''
__lowercase : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase : str = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__lowercase : Union[str, Any] = {
'''facebook/xglm-564M''': 2048,
}
class __lowercase ( snake_case__ ):
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__(self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A = None , **A , ):
lowerCamelCase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCamelCase_ : List[str] = 7
lowerCamelCase_ : int = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowerCamelCase_ : Optional[int] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
lowerCamelCase_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowerCamelCase_ : int = len(self.sp_model )
lowerCamelCase_ : Dict = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__lowercase )
lowerCamelCase_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
lowerCamelCase_ : Optional[int] = self.__dict__.copy()
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , A ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ : List[Any] = {}
lowerCamelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ (self , A , A = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCamelCase_ : int = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase ))
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase ))
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ (self ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ (self , A ):
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def UpperCAmelCase__ (self , A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ : Optional[Any] = self.sp_model.PieceToId(__lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ (self , A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Tuple = ''''''.join(__lowercase ).replace(__lowercase , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Optional[Any] = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , '''wb''' ) as fi:
lowerCamelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 422 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : int = LEDConfig
__snake_case : int = {}
__snake_case : Any = """gelu"""
def __init__( self :Optional[Any] , __lowercase :List[Any] , __lowercase :int=13 , __lowercase :Union[str, Any]=7 , __lowercase :List[Any]=True , __lowercase :List[Any]=False , __lowercase :str=99 , __lowercase :Optional[int]=32 , __lowercase :List[Any]=2 , __lowercase :str=4 , __lowercase :str=37 , __lowercase :Any=0.1 , __lowercase :List[Any]=0.1 , __lowercase :Optional[Any]=20 , __lowercase :Union[str, Any]=2 , __lowercase :str=1 , __lowercase :List[str]=0 , __lowercase :List[Any]=4 , ):
__lowerCamelCase : Union[str, Any] =parent
__lowerCamelCase : List[Any] =batch_size
__lowerCamelCase : str =seq_length
__lowerCamelCase : List[str] =is_training
__lowerCamelCase : Dict =use_labels
__lowerCamelCase : int =vocab_size
__lowerCamelCase : Union[str, Any] =hidden_size
__lowerCamelCase : Any =num_hidden_layers
__lowerCamelCase : List[Any] =num_attention_heads
__lowerCamelCase : str =intermediate_size
__lowerCamelCase : Optional[Any] =hidden_dropout_prob
__lowerCamelCase : Optional[Any] =attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] =max_position_embeddings
__lowerCamelCase : Optional[Any] =eos_token_id
__lowerCamelCase : str =pad_token_id
__lowerCamelCase : Any =bos_token_id
__lowerCamelCase : Union[str, Any] =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__lowerCamelCase : List[Any] =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__lowerCamelCase : Optional[Any] =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase : List[str] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Optional[int] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__lowerCamelCase : int =prepare_led_inputs_dict(__lowercase , __lowercase , __lowercase )
__lowerCamelCase : Optional[Any] =tf.concat(
[tf.zeros_like(__lowercase )[:, :-1], tf.ones_like(__lowercase )[:, -1:]] , axis=-1 , )
__lowerCamelCase : Any =global_attention_mask
return config, inputs_dict
def __lowercase ( self :Union[str, Any] , __lowercase :List[str] , __lowercase :List[str] ):
__lowerCamelCase : int =TFLEDModel(config=__lowercase ).get_decoder()
__lowerCamelCase : Dict =inputs_dict['''input_ids''']
__lowerCamelCase : Optional[Any] =input_ids[:1, :]
__lowerCamelCase : List[Any] =inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase : List[Any] =1
# first forward pass
__lowerCamelCase : List[Any] =model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
__lowerCamelCase , __lowerCamelCase : Any =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase : List[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : Union[str, Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase : Dict =tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase : str =model(__lowercase , attention_mask=__lowercase )[0]
__lowerCamelCase : List[str] =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase : List[str] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase : Dict =output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase : int =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Tuple=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase : Optional[int] =tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase : Dict =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__snake_case : Tuple = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__snake_case : Optional[Any] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case : int = True
__snake_case : int = False
__snake_case : Optional[int] = False
__snake_case : Optional[int] = False
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : int =TFLEDModelTester(self )
__lowerCamelCase : Any =ConfigTester(self , config_class=__lowercase )
def __lowercase ( self :Dict ):
self.config_tester.run_common_tests()
def __lowercase ( self :str ):
__lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase , __lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int =tf.zeros_like(inputs_dict['''attention_mask'''] )
__lowerCamelCase : List[Any] =2
__lowerCamelCase : Optional[Any] =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__lowerCamelCase : Tuple =True
__lowerCamelCase : Optional[Any] =self.model_tester.seq_length
__lowerCamelCase : List[str] =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowercase :Any ):
__lowerCamelCase : Union[str, Any] =outputs.decoder_attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowercase :List[str] ):
__lowerCamelCase : Any =[t.numpy() for t in outputs.encoder_attentions]
__lowerCamelCase : Optional[int] =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] =True
__lowerCamelCase : int =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : str =model_class(__lowercase )
__lowerCamelCase : Any =model(self._prepare_for_class(__lowercase , __lowercase ) )
__lowerCamelCase : int =len(__lowercase )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
if self.is_encoder_decoder:
__lowerCamelCase : Optional[int] =model_class(__lowercase )
__lowerCamelCase : Union[str, Any] =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_decoder_attentions_output(__lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCamelCase : Any =True
__lowerCamelCase : Union[str, Any] =model_class(__lowercase )
__lowerCamelCase : Tuple =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
# Check attention is always last and order is fine
__lowerCamelCase : Optional[Any] =True
__lowerCamelCase : Optional[int] =True
__lowerCamelCase : Dict =model_class(__lowercase )
__lowerCamelCase : Union[str, Any] =model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowercase ) )
self.assertEqual(model.config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __lowercase ( self :int ):
pass
def __lowercase ( self :int ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
_UpperCamelCase = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :List[str] ):
__lowerCamelCase : Dict =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__lowerCamelCase : List[str] =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Dict =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Optional[int] =prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__lowerCamelCase : Dict =model(**__lowercase )[0]
__lowerCamelCase : Optional[int] =(1, 1024, 768)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__lowerCamelCase : Dict =tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 )
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__lowerCamelCase : Union[str, Any] =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Any =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__lowerCamelCase : Tuple =prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__lowerCamelCase : List[Any] =model(**__lowercase )[0]
__lowerCamelCase : str =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__lowerCamelCase : List[str] =tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 , rtol=1e-3 )
| 179 | 0 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE__ : List[Any] = grid[0]
for row_n in range(1 ,len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : int = grid[row_n]
SCREAMING_SNAKE_CASE__ : Optional[int] = fill_row(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = grid[row_n]
return grid[-1][-1]
def lowercase_ ( _snake_case ,_snake_case ):
current_row[0] += row_above[0]
for cell_n in range(1 ,len(_snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 545 |
"""simple docstring"""
import baseaa
def lowercase_ ( _snake_case ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowercase_ ( _snake_case ):
return baseaa.baadecode(_snake_case ).decode("""utf-8""" )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = 'Hello World!'
UpperCAmelCase__ : Tuple = baseaa_encode(test)
print(encoded)
UpperCAmelCase__ : Tuple = baseaa_decode(encoded)
print(decoded)
| 545 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Any = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 1_28, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 1_42, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
UpperCamelCase : Optional[Any] = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 1_28,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 1_42,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(lowerCamelCase ) , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , x.transpose() ) )
UpperCamelCase : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = np.random.randn(3 , 4 )
UpperCamelCase : Optional[int] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , transpose(lowerCamelCase ).numpy() ) )
UpperCamelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCamelCase : Tuple = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , transpose(lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : int = np.random.randn(3 , 4 )
UpperCamelCase : Tuple = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , transpose(lowerCamelCase ).numpy() ) )
UpperCamelCase : str = np.random.randn(3 , 4 , 5 )
UpperCamelCase : Any = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , transpose(lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCamelCase : List[Any] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , np.asarray(transpose(lowerCamelCase ) ) ) )
UpperCamelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCamelCase : int = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , np.reshape(lowerCamelCase , (4, 3) ) ) )
UpperCamelCase : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , np.reshape(lowerCamelCase , (12, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : List[Any] = np.random.randn(3 , 4 )
UpperCamelCase : int = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , reshape(lowerCamelCase , (4, 3) ).numpy() ) )
UpperCamelCase : str = np.random.randn(3 , 4 , 5 )
UpperCamelCase : List[Any] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , reshape(lowerCamelCase , (12, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : int = np.random.randn(3 , 4 )
UpperCamelCase : Optional[int] = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , reshape(lowerCamelCase , (4, 3) ).numpy() ) )
UpperCamelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCamelCase : Any = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , reshape(lowerCamelCase , (12, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : List[Any] = np.random.randn(3 , 4 )
UpperCamelCase : str = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , np.asarray(reshape(lowerCamelCase , (4, 3) ) ) ) )
UpperCamelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCamelCase : Optional[int] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , np.asarray(reshape(lowerCamelCase , (12, 5) ) ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , np.squeeze(lowerCamelCase ) ) )
UpperCamelCase : Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , np.squeeze(lowerCamelCase , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = np.random.randn(1 , 3 , 4 )
UpperCamelCase : Any = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , squeeze(lowerCamelCase ).numpy() ) )
UpperCamelCase : Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase : List[Any] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , squeeze(lowerCamelCase , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : int = np.random.randn(1 , 3 , 4 )
UpperCamelCase : Tuple = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , squeeze(lowerCamelCase ).numpy() ) )
UpperCamelCase : Any = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase : int = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , squeeze(lowerCamelCase , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCamelCase : Tuple = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , np.asarray(squeeze(lowerCamelCase ) ) ) )
UpperCamelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase : Any = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , np.asarray(squeeze(lowerCamelCase , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , np.expand_dims(lowerCamelCase , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCamelCase : int = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , expand_dims(lowerCamelCase , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = np.random.randn(3 , 4 )
UpperCamelCase : Optional[int] = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , expand_dims(lowerCamelCase , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Dict = np.random.randn(3 , 4 )
UpperCamelCase : int = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , np.asarray(expand_dims(lowerCamelCase , axis=1 ) ) ) )
| 173 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 173 | 1 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCamelCase__ = spec.loader.load_module()
lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase__ = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCamelCase__ = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
__snake_case :Optional[Any] = False
# source code of `config_class`
__snake_case :Dict = inspect.getsource(snake_case__ )
__snake_case :int = _re_checkpoint.findall(snake_case__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__snake_case :Union[str, Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__snake_case :Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
__snake_case :Optional[int] = True
break
__snake_case :Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case__ )
if len(snake_case__ ) > 0:
__snake_case :Dict = """\n""".join(sorted(snake_case__ ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 709 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case__ :
'''simple docstring'''
lowerCamelCase : int
lowerCamelCase : int
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ ) -> Any:
'''simple docstring'''
__snake_case :list[list[Edge]] = [[] for _ in range(a__ )]
__snake_case :List[str] = size
def __getitem__( self , a__ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __lowercase ( self ) -> str:
'''simple docstring'''
return self._size
def __lowercase ( self , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a__ , a__ ) )
def __lowercase ( self , a__ , a__ ) -> int | None:
'''simple docstring'''
__snake_case :Optional[Any] = deque([start_vertex] )
__snake_case :list[int | None] = [None] * self.size
__snake_case :Tuple = 0
while queue:
__snake_case :List[Any] = queue.popleft()
__snake_case :Optional[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__snake_case :Optional[Any] = current_distance + edge.weight
__snake_case :Dict = distances[edge.destination_vertex]
if (
isinstance(a__ , a__ )
and new_distance >= dest_vertex_distance
):
continue
__snake_case :Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_lowerCAmelCase: List[str] = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> str:
a__ =[file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
a__ =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
a__ =[file for file in files if n_ not in file]
else:
a__ =[file for file in files if n_identifier not in file]
a__ =ignore_files or []
ignore_files.append('__init__.py')
a__ =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
a__ =file.split('.')[0]
try:
a__ =getattr(lowercase_ , lowercase_)
a__ =doctest.DocTestSuite(lowercase_)
a__ =unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""")
else:
a__ =doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =Path('src/transformers')
a__ ='modeling'
a__ =[
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =Path('src/transformers')
a__ ='tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =Path('src/transformers')
a__ ='configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def __UpperCamelCase ( self) -> Tuple:
a__ =Path('src/transformers')
a__ =['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =Path('docs/source')
a__ =['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class a ( lowercase ):
UpperCamelCase : Union[str, Any] = """bert-generation"""
def __init__( self , UpperCamelCase_=50_358 , UpperCamelCase_=1_024 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=4_096 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_="absolute" , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : Dict = use_cache
| 110 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :int=1_3 , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :int=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :int=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=9_9 , lowerCamelCase_ :str=6_4 , lowerCamelCase_ :Tuple=3_2 , lowerCamelCase_ :Dict=5 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Union[str, Any]=3_7 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :List[str]=5_1_2 , lowerCamelCase_ :List[Any]=1_6 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=0.02 , lowerCamelCase_ :str=3 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Any=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = embedding_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def lowerCamelCase__ ( self :str ) -> Any:
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple ) -> Any:
"""simple docstring"""
UpperCamelCase__ = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase__ = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase__ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase__ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , next_sentence_label=lowerCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase__ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self :str ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = True
def lowerCamelCase__ ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=False ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def lowerCamelCase__ ( self :Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = MobileBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self :int ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def lowerCamelCase__ ( self :int ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def lowerCamelCase__ ( self :str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def lowerCamelCase__ ( self :List[str] ) -> Any:
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def lowerCamelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase__ ( self :Any ) -> Any:
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def lowerCamelCase__ ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def lowerCamelCase__ ( self :Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def snake_case__ ( _snake_case : Optional[int] ):
"""simple docstring"""
return torch.tensor(
_snake_case , dtype=torch.long , device=_snake_case , )
A : List[Any] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(lowerCamelCase_ )
UpperCamelCase__ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
UpperCamelCase__ = model(lowerCamelCase_ )[0]
UpperCamelCase__ = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , lowerCamelCase_ )
UpperCamelCase__ = torch.tensor(
[
[
[-2.4736526e07, 8.2691656e04, 1.6521838e05],
[-5.7541704e-01, 3.9056022e00, 4.4011507e00],
[2.6047359e00, 1.5677652e00, -1.7324188e-01],
]
] , device=lowerCamelCase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCamelCase__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCamelCase__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 706 | """simple docstring"""
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCamelCase_ :list ) -> None:
"""simple docstring"""
UpperCamelCase__ = set_counts
UpperCamelCase__ = max(lowerCamelCase_ )
UpperCamelCase__ = len(lowerCamelCase_ )
UpperCamelCase__ = [1] * num_sets
UpperCamelCase__ = list(range(lowerCamelCase_ ) )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> bool:
"""simple docstring"""
UpperCamelCase__ = self.get_parent(lowerCamelCase_ )
UpperCamelCase__ = self.get_parent(lowerCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase__ = 0
UpperCamelCase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase__ = 0
UpperCamelCase__ = src_parent
UpperCamelCase__ = self.set_counts[src_parent]
UpperCamelCase__ = max(self.max_set , lowerCamelCase_ )
return True
def lowerCamelCase__ ( self :int , lowerCamelCase_ :int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 304 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.