code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import sys
A : Tuple = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A : Dict = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : List[Any] ) -> str:
"""simple docstring"""
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : str , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *__magic_name__ : Any , **__magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *__magic_name__ : Dict , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 15 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =FunnelTokenizer
a_ =FunnelTokenizerFast
a_ =True
a_ =True
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "UNwant\u00E9d,running"
lowerCAmelCase__ = "unwanted, running"
return input_text, output_text
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
lowerCAmelCase__ = tokenizer("UNwant\u00E9d,running" )
lowerCAmelCase__ = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
lowerCAmelCase__ = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 339 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowerCAmelCase : Tuple =pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
inspect_dataset(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = path + '''.py'''
assert script_name in os.listdir(lowercase__ )
assert "__pycache__" not in os.listdir(lowercase__ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
inspect_metric(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = path + '''.py'''
assert script_name in os.listdir(lowercase__ )
assert "__pycache__" not in os.listdir(lowercase__ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = get_dataset_config_info(lowercase__ , config_name=lowercase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
with pytest.raises(lowercase__ ):
get_dataset_config_info(lowercase__ , config_name=lowercase__ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = get_dataset_config_names(lowercase__ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = get_dataset_infos(lowercase__ )
assert list(infos.keys() ) == expected_configs
__SCREAMING_SNAKE_CASE : List[Any] = expected_configs[0]
assert expected_config in infos
__SCREAMING_SNAKE_CASE : Optional[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = get_dataset_infos(lowercase__ )
assert expected_config in infos
__SCREAMING_SNAKE_CASE : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
with pytest.raises(lowercase__ ):
get_dataset_split_names(lowercase__ , config_name=lowercase__ )
| 719 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['''pixel_values''']
def __init__( self :str , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Dict[str, int]] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ :Tuple , ) -> None:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = size if size is not None else {'''shortest_edge''': 256}
__SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = do_resize
__SCREAMING_SNAKE_CASE : str = size
__SCREAMING_SNAKE_CASE : Any = resample
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop
__SCREAMING_SNAKE_CASE : Tuple = crop_size
__SCREAMING_SNAKE_CASE : List[str] = do_rescale
__SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
__SCREAMING_SNAKE_CASE : Any = do_normalize
__SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :int , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : Tuple = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Any , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : str = get_size_dict(lowerCAmelCase__ )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :float , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :str ) -> np.ndarray:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :List[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[float] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : Tuple = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : Dict = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE : str = get_size_dict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : int = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE : int = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : Tuple = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 260 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "bert-generation"
def __init__( self : Any , _lowercase : List[Any]=5_03_58 , _lowercase : str=10_24 , _lowercase : str=24 , _lowercase : int=16 , _lowercase : Any=40_96 , _lowercase : Union[str, Any]="gelu" , _lowercase : str=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Optional[Any]=5_12 , _lowercase : str=0.02 , _lowercase : Dict=1E-12 , _lowercase : int=0 , _lowercase : Optional[int]=2 , _lowercase : Dict=1 , _lowercase : Dict="absolute" , _lowercase : Tuple=True , **_lowercase : str , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 49 |
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 709 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 71 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 316 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.array:
'''simple docstring'''
lowercase_ = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ = np.zeros((n + 1,) )
lowercase_ = ya
lowercase_ = xa
for k in range(__lowerCAmelCase ):
lowercase_ = y[k] + step_size * ode_func(__lowerCAmelCase , y[k] )
lowercase_ = y[k] + (
(step_size / 2) * (ode_func(__lowerCAmelCase , y[k] ) + ode_func(x + step_size , __lowerCAmelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 | 1 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowercase : List[Any] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowercase : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
__a : Tuple = SavedModel()
__a : Any = []
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__a : Dict = json.load(_SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_SCREAMING_SNAKE_CASE )] )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__a : int = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__a : int = sorted(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_SCREAMING_SNAKE_CASE )
if strict and len(_SCREAMING_SNAKE_CASE ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*_SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
__lowercase : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 476 |
'''simple docstring'''
from math import ceil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 1_001 ):
__a : Union[str, Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__a : Optional[Any] = 2 * i + 1
__a : Dict = 2 * i
__a : List[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__lowercase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 476 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = 42
_lowerCamelCase = jnp.floataa
_lowerCamelCase = True
def __UpperCamelCase ( self : Optional[Any]):
super().setup()
UpperCamelCase__ : int = nn.Dense(5 , dtype=self.dtype)
def __call__( self : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict):
UpperCamelCase__ : str = super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : str = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = FlaxBigBirdForNaturalQuestionsModule
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
def cross_entropy(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None):
UpperCamelCase__ : Tuple = logits.shape[-1]
UpperCamelCase__ : Any = (labels[..., None] == jnp.arange(lowerCamelCase_)[None]).astype('f4')
UpperCamelCase__ : Tuple = jax.nn.log_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase__ : List[str] = -jnp.sum(labels * logits , axis=-1)
if reduction is not None:
UpperCamelCase__ : List[str] = reduction(lowerCamelCase_)
return loss
UpperCamelCase__ : int = partial(lowerCamelCase_ , reduction=jnp.mean)
UpperCamelCase__ : int = cross_entropy(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : str = cross_entropy(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Dict = cross_entropy(lowerCamelCase_ , lowerCamelCase_)
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __lowercase :
_lowerCamelCase = "google/bigbird-roberta-base"
_lowerCamelCase = 3000
_lowerCamelCase = 10500
_lowerCamelCase = 128
_lowerCamelCase = 3
_lowerCamelCase = 1
_lowerCamelCase = 5
# tx_args
_lowerCamelCase = 3E-5
_lowerCamelCase = 0.0
_lowerCamelCase = 20000
_lowerCamelCase = 0.0095
_lowerCamelCase = "bigbird-roberta-natural-questions"
_lowerCamelCase = "training-expt"
_lowerCamelCase = "data/nq-training.jsonl"
_lowerCamelCase = "data/nq-validation.jsonl"
def __UpperCamelCase ( self : List[Any]):
os.makedirs(self.base_dir , exist_ok=UpperCAmelCase_)
UpperCamelCase__ : str = os.path.join(self.base_dir , self.save_dir)
UpperCamelCase__ : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = 4096 # no dynamic padding on TPUs
def __call__( self : Dict , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = self.collate_fn(UpperCAmelCase_)
UpperCamelCase__ : Tuple = jax.tree_util.tree_map(UpperCAmelCase_ , UpperCAmelCase_)
return batch
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__, UpperCamelCase__ : List[Any] = self.fetch_inputs(features['input_ids'])
UpperCamelCase__ : Union[str, Any] = {
'input_ids': jnp.array(UpperCAmelCase_ , dtype=jnp.intaa),
'attention_mask': jnp.array(UpperCAmelCase_ , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : list):
UpperCamelCase__ : int = [self._fetch_inputs(UpperCAmelCase_) for ids in input_ids]
return zip(*UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list):
UpperCamelCase__ : Dict = [1 for _ in range(len(UpperCAmelCase_))]
while len(UpperCAmelCase_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if seed is not None:
UpperCamelCase__ : str = dataset.shuffle(seed=lowerCamelCase_)
for i in range(len(lowerCamelCase_) // batch_size):
UpperCamelCase__ : Any = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase_)
@partial(jax.pmap , axis_name='batch')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> Optional[Any]:
def loss_fn(lowerCamelCase_):
UpperCamelCase__ : List[Any] = model_inputs.pop('start_labels')
UpperCamelCase__ : Any = model_inputs.pop('end_labels')
UpperCamelCase__ : Optional[Any] = model_inputs.pop('pooled_labels')
UpperCamelCase__ : int = state.apply_fn(**lowerCamelCase_ , params=lowerCamelCase_ , dropout_rng=lowerCamelCase_ , train=lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Any = outputs
return state.loss_fn(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__, UpperCamelCase__ : str = jax.random.split(lowerCamelCase_)
UpperCamelCase__ : str = jax.value_and_grad(lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__ : Any = grad_fn(state.params)
UpperCamelCase__ : Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch')
UpperCamelCase__ : Optional[Any] = jax.lax.pmean(lowerCamelCase_ , 'batch')
UpperCamelCase__ : List[Any] = state.apply_gradients(grads=lowerCamelCase_)
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch')
def __UpperCAmelCase ( lowerCamelCase_ , **lowerCamelCase_) -> Dict:
UpperCamelCase__ : Tuple = model_inputs.pop('start_labels')
UpperCamelCase__ : Dict = model_inputs.pop('end_labels')
UpperCamelCase__ : Any = model_inputs.pop('pooled_labels')
UpperCamelCase__ : Optional[Any] = state.apply_fn(**lowerCamelCase_ , params=state.params , train=lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = outputs
UpperCamelCase__ : str = state.loss_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = jax.lax.pmean({'loss': loss} , axis_name='batch')
return metrics
class __lowercase (train_state.TrainState ):
_lowerCamelCase = struct.field(pytree_node=__lowerCamelCase )
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=None):
UpperCamelCase__ : Any = model.params
UpperCamelCase__ : str = TrainState.create(
apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , loss_fn=UpperCAmelCase_ , )
if ckpt_dir is not None:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Optional[int] = restore_checkpoint(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Tuple = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = build_tx(**UpperCAmelCase_)
UpperCamelCase__ : List[Any] = train_state.TrainState(
step=UpperCAmelCase_ , apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , opt_state=UpperCAmelCase_ , )
UpperCamelCase__ : Tuple = args
UpperCamelCase__ : Optional[Any] = data_collator
UpperCamelCase__ : str = lr
UpperCamelCase__ : Any = params
UpperCamelCase__ : Any = jax_utils.replicate(UpperCAmelCase_)
return state
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str):
UpperCamelCase__ : int = self.args
UpperCamelCase__ : str = len(UpperCAmelCase_) // args.batch_size
UpperCamelCase__ : Tuple = jax.random.PRNGKey(0)
UpperCamelCase__ : Optional[Any] = jax.random.split(UpperCAmelCase_ , jax.device_count())
for epoch in range(args.max_epochs):
UpperCamelCase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
UpperCamelCase__ : Optional[int] = get_batched_dataset(UpperCAmelCase_ , args.batch_size , seed=UpperCAmelCase_)
UpperCamelCase__ : Dict = 0
for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc=F'Running EPOCH-{epoch}'):
UpperCamelCase__ : str = self.data_collator(UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Dict = self.train_step_fn(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
UpperCamelCase__ : Optional[Any] = jax_utils.unreplicate(state.step)
UpperCamelCase__ : List[Any] = running_loss.item() / i
UpperCamelCase__ : Optional[Any] = self.scheduler_fn(state_step - 1)
UpperCamelCase__ : List[str] = self.evaluate(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Dict = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(UpperCAmelCase_))
self.logger.log(UpperCAmelCase_ , commit=UpperCAmelCase_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int):
UpperCamelCase__ : str = get_batched_dataset(UpperCAmelCase_ , self.args.batch_size)
UpperCamelCase__ : Any = len(UpperCAmelCase_) // self.args.batch_size
UpperCamelCase__ : Optional[Any] = jnp.array(0 , dtype=jnp.floataa)
UpperCamelCase__ : Optional[Any] = 0
for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc='Evaluating ... '):
UpperCamelCase__ : Optional[Any] = self.data_collator(UpperCAmelCase_)
UpperCamelCase__ : str = self.val_step_fn(UpperCAmelCase_ , **UpperCAmelCase_)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Union[str, Any] = jax_utils.unreplicate(UpperCAmelCase_)
print(F'SAVING CHECKPOINT IN {save_dir}' , end=' ... ')
self.model_save_fn(UpperCAmelCase_ , params=state.params)
with open(os.path.join(UpperCAmelCase_ , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(UpperCAmelCase_ , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(UpperCAmelCase_ , 'data_collator.joblib'))
with open(os.path.join(UpperCAmelCase_ , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , UpperCAmelCase_)
print('DONE')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Dict:
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ')
with open(os.path.join(lowerCamelCase_ , 'flax_model.msgpack') , 'rb') as f:
UpperCamelCase__ : Optional[Any] = from_bytes(state.params , f.read())
with open(os.path.join(lowerCamelCase_ , 'opt_state.msgpack') , 'rb') as f:
UpperCamelCase__ : Any = from_bytes(state.opt_state , f.read())
UpperCamelCase__ : Union[str, Any] = joblib.load(os.path.join(lowerCamelCase_ , 'args.joblib'))
UpperCamelCase__ : int = joblib.load(os.path.join(lowerCamelCase_ , 'data_collator.joblib'))
with open(os.path.join(lowerCamelCase_ , 'training_state.json') , 'r') as f:
UpperCamelCase__ : str = json.load(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = training_state['step']
print('DONE')
return params, opt_state, step, args, data_collator
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : List[str] = num_train_steps - warmup_steps
UpperCamelCase__ : List[Any] = optax.linear_schedule(init_value=lowerCamelCase_ , end_value=lowerCamelCase_ , transition_steps=lowerCamelCase_)
UpperCamelCase__ : Dict = optax.linear_schedule(init_value=lowerCamelCase_ , end_value=1e-7 , transition_steps=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps])
return lr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
def weight_decay_mask(lowerCamelCase_):
UpperCamelCase__ : int = traverse_util.flatten_dict(lowerCamelCase_)
UpperCamelCase__ : Dict = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase_)
UpperCamelCase__ : str = scheduler_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : str = optax.adamw(learning_rate=lowerCamelCase_ , weight_decay=lowerCamelCase_ , mask=lowerCamelCase_)
return tx, lr
| 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE : str = BertConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = BertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : Any =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowercase : Any = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowercase : Union[str, Any] = get_tests_dir("fixtures/vocab.json")
_lowercase : int = get_tests_dir("fixtures")
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def _UpperCAmelCase ( self ) -> List[Any]:
A = 0
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(a__ , a__ )
def _UpperCAmelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
A = WavaVecaConfig()
A = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(a__ )
processor.save_pretrained(a__ )
A = AutoProcessor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def _UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a__ , os.path.join(a__ , a__ ) )
copyfile(a__ , os.path.join(a__ , """vocab.json""" ) )
A = AutoProcessor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def _UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
A = WavaVecaFeatureExtractor()
A = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
A = WavaVecaProcessor(a__ , a__ )
# save in new folder
processor.save_pretrained(a__ )
# drop `processor_class` in tokenizer
with open(os.path.join(a__ , a__ ) , """r""" ) as f:
A = json.load(a__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(a__ , a__ ) , """w""" ) as f:
f.write(json.dumps(a__ ) )
A = AutoProcessor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def _UpperCAmelCase ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
A = WavaVecaFeatureExtractor()
A = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
A = WavaVecaProcessor(a__ , a__ )
# save in new folder
processor.save_pretrained(a__ )
# drop `processor_class` in feature extractor
with open(os.path.join(a__ , a__ ) , """r""" ) as f:
A = json.load(a__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(a__ , a__ ) , """w""" ) as f:
f.write(json.dumps(a__ ) )
A = AutoProcessor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def _UpperCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
A = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(a__ )
# copy relevant files
copyfile(a__ , os.path.join(a__ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(a__ , a__ ) , """w""" ) as f:
f.write("""{}""" )
A = AutoProcessor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def _UpperCAmelCase ( self ) -> List[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a__ ):
A = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
A = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=a__ )
A = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=a__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
A = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
A = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=a__ , use_fast=a__ )
A = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , a__ )
AutoFeatureExtractor.register(a__ , a__ )
AutoTokenizer.register(a__ , slow_tokenizer_class=a__ )
AutoProcessor.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoProcessor.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A = CustomFeatureExtractor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(a__ , """vocab.txt""" )
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
A = CustomTokenizer(a__ )
A = CustomProcessor(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a__ )
A = AutoProcessor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ) -> Any:
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = False
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = False
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'AutoFeatureExtractor'
lowerCAmelCase = 'AutoTokenizer'
lowerCAmelCase = False
try:
AutoConfig.register("""custom""" , a__ )
AutoFeatureExtractor.register(a__ , a__ )
AutoTokenizer.register(a__ , slow_tokenizer_class=a__ )
AutoProcessor.register(a__ , a__ )
# If remote code is not set, the default is to use local classes.
A = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=a__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=a__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ) -> Dict:
A = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def _UpperCAmelCase ( self ) -> str:
A = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _UpperCAmelCase ( cls ) -> str:
A = TOKEN
HfFolder.save_token(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def _UpperCAmelCase ( self ) -> Any:
A = WavaVecaProcessor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a__ , """test-processor""" ) , push_to_hub=a__ , use_auth_token=self._token )
A = WavaVecaProcessor.from_pretrained(f'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a__ , getattr(new_processor.feature_extractor , a__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCAmelCase ( self ) -> Optional[int]:
A = WavaVecaProcessor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a__ , """test-processor-org""" ) , push_to_hub=a__ , use_auth_token=self._token , organization="""valid_org""" , )
A = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a__ , getattr(new_processor.feature_extractor , a__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCAmelCase ( self ) -> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A = CustomFeatureExtractor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(a__ , """vocab.txt""" )
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
A = CustomTokenizer(a__ )
A = CustomProcessor(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'{USER}/test-dynamic-processor' , token=self._token )
A = Repository(a__ , clone_from=f'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(a__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a__ , """tokenizer_config.json""" ) ) as f:
A = json.load(a__ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a__ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a__ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a__ , """custom_processing.py""" ) ) )
repo.push_to_hub()
A = AutoProcessor.from_pretrained(f'{USER}/test-dynamic-processor' , trust_remote_code=a__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 700 |
import math
def _lowerCAmelCase ( UpperCamelCase__: int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( UpperCamelCase__: float = 0.1 ) -> int:
"""simple docstring"""
A = 3
A = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCamelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
SCREAMING_SNAKE_CASE_ = F"""https://www.google.com/search?q={query}&num=100"""
SCREAMING_SNAKE_CASE_ = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE_ = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
SCREAMING_SNAKE_CASE_ = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link) | 237 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""DPTFeatureExtractor"""]
SCREAMING_SNAKE_CASE_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 237 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase : Any ={"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 504 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : Dict =logging.get_logger(__name__)
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = size if size is not None else {"shortest_edge": 256}
UpperCamelCase_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase_ = get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ ):
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCamelCase_ = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 504 | 1 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
A_ : Optional[int] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
A_ : str = None
def __snake_case ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=__A , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=__A , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __snake_case ( __A : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE : Optional[Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def __snake_case ( __A : int ) -> List[str]:
'''simple docstring'''
def remove_articles(__A : Tuple ):
return ARTICLES_REGEX.sub(' ' , __A )
def white_space_fix(__A : Dict ):
return " ".join(text.split() )
def remove_punc(__A : Tuple ):
SCREAMING_SNAKE_CASE : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def __snake_case ( __A : str ) -> str:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def __snake_case ( __A : List[str] , __A : str ) -> int:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def __snake_case ( __A : Dict , __A : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = get_tokens(__A )
SCREAMING_SNAKE_CASE : Optional[Any] = get_tokens(__A )
SCREAMING_SNAKE_CASE : Union[str, Any] = collections.Counter(__A ) & collections.Counter(__A )
SCREAMING_SNAKE_CASE : Optional[Any] = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE : int = 1.0 * num_same / len(__A )
SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0 * num_same / len(__A )
SCREAMING_SNAKE_CASE : Optional[int] = (2 * precision * recall) / (precision + recall)
return fa
def __snake_case ( __A : Union[str, Any] , __A : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE : Tuple = qa['id']
SCREAMING_SNAKE_CASE : str = [t for t in qa['answers']['text'] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE : Optional[int] = ['']
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
SCREAMING_SNAKE_CASE : List[str] = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE : str = max(compute_exact(__A , __A ) for a in gold_answers )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def __snake_case ( __A : Optional[int] , __A : List[Any] , __A : List[Any] , __A : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE : str = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE : List[Any] = float(not qid_to_has_ans[qid] )
else:
SCREAMING_SNAKE_CASE : Any = s
return new_scores
def __snake_case ( __A : Union[str, Any] , __A : Optional[Any] , __A : Any=None ) -> Optional[Any]:
'''simple docstring'''
if not qid_list:
SCREAMING_SNAKE_CASE : Optional[int] = len(__A )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores.values() ) / total),
('f1', 1_0_0.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
SCREAMING_SNAKE_CASE : Any = len(__A )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def __snake_case ( __A : List[Any] , __A : Union[str, Any] , __A : Optional[int] ) -> List[Any]:
'''simple docstring'''
for k in new_eval:
SCREAMING_SNAKE_CASE : int = new_eval[k]
def __snake_case ( __A : Optional[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Optional[Any]:
'''simple docstring'''
plt.step(__A , __A , color='b' , alpha=0.2 , where='post' )
plt.fill_between(__A , __A , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def __snake_case ( __A : Dict , __A : Any , __A : List[str] , __A : List[Any] , __A : Dict=None , __A : Tuple=None ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = sorted(__A , key=lambda __A : na_probs[k] )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
SCREAMING_SNAKE_CASE : Any = 1.0
SCREAMING_SNAKE_CASE : List[str] = 0.0
SCREAMING_SNAKE_CASE : Union[str, Any] = [1.0]
SCREAMING_SNAKE_CASE : Tuple = [0.0]
SCREAMING_SNAKE_CASE : str = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE : int = true_pos / float(i + 1 )
SCREAMING_SNAKE_CASE : List[str] = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 1_0_0.0 * avg_prec}
def __snake_case ( __A : Tuple , __A : List[str] , __A : Union[str, Any] , __A : Optional[Any] , __A : Tuple , __A : str ) -> Union[str, Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
SCREAMING_SNAKE_CASE : Dict = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE : List[Any] = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
SCREAMING_SNAKE_CASE : Optional[Any] = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
SCREAMING_SNAKE_CASE : str = {k: float(__A ) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(__A , __A , 'pr_exact' )
merge_eval(__A , __A , 'pr_f1' )
merge_eval(__A , __A , 'pr_oracle' )
def __snake_case ( __A : List[Any] , __A : str , __A : Tuple , __A : int ) -> str:
'''simple docstring'''
if not qid_list:
return
SCREAMING_SNAKE_CASE : Any = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE : int = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__A , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def __snake_case ( __A : List[str] , __A : Optional[Any] , __A : Union[str, Any] , __A : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_no_ans
SCREAMING_SNAKE_CASE : Tuple = cur_score
SCREAMING_SNAKE_CASE : Dict = 0.0
SCREAMING_SNAKE_CASE : Tuple = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE : Union[str, Any] = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE : Optional[Any] = -1
else:
SCREAMING_SNAKE_CASE : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE : str = cur_score
SCREAMING_SNAKE_CASE : int = na_probs[qid]
return 1_0_0.0 * best_score / len(__A ), best_thresh
def __snake_case ( __A : Any , __A : Optional[int] , __A : List[Any] , __A : Any , __A : Tuple , __A : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = find_best_thresh(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = find_best_thresh(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE : str = best_exact
SCREAMING_SNAKE_CASE : Optional[int] = exact_thresh
SCREAMING_SNAKE_CASE : Tuple = best_fa
SCREAMING_SNAKE_CASE : List[str] = fa_thresh
def __snake_case ( ) -> Optional[Any]:
'''simple docstring'''
with open(OPTS.data_file ) as f:
SCREAMING_SNAKE_CASE : int = json.load(__A )
SCREAMING_SNAKE_CASE : int = dataset_json['data']
with open(OPTS.pred_file ) as f:
SCREAMING_SNAKE_CASE : Any = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(__A )
else:
SCREAMING_SNAKE_CASE : List[Any] = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE : Union[str, Any] = make_qid_to_has_ans(__A ) # maps qid to True/False
SCREAMING_SNAKE_CASE : List[str] = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE : Tuple = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_raw_scores(__A , __A )
SCREAMING_SNAKE_CASE : Optional[int] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE : List[Any] = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE : Any = make_eval_dict(__A , __A )
if has_ans_qids:
SCREAMING_SNAKE_CASE : Optional[int] = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , 'HasAns' )
if no_ans_qids:
SCREAMING_SNAKE_CASE : List[str] = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(__A , __A , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
A_ : List[str] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 265 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : str = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
def A ( snake_case :int ) -> bool:
return str(snake_case ) == str(snake_case )[::-1]
def A ( snake_case :int ) -> int:
return int(snake_case ) + int(str(snake_case )[::-1] )
def A ( snake_case :int = 1_0_0_0_0 ) -> int:
__UpperCamelCase = []
for num in range(1 , snake_case ):
__UpperCamelCase = 0
__UpperCamelCase = num
while iterations < 5_0:
__UpperCamelCase = sum_reverse(snake_case )
iterations += 1
if is_palindrome(snake_case ):
break
else:
lychrel_nums.append(snake_case )
return len(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 293 | 0 |
'''simple docstring'''
import itertools
import math
def _a ( lowerCamelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( ):
snake_case : Tuple =2
while True:
if is_prime(lowerCamelCase_ ):
yield num
num += 1
def _a ( lowerCamelCase_ = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , lowerCamelCase_ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =SwinvaConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =swinva_name.split("""_""" )
SCREAMING_SNAKE_CASE_: Tuple =name_split[1]
if "to" in name_split[3]:
SCREAMING_SNAKE_CASE_: List[str] =int(name_split[3][-3:] )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =int(name_split[3] )
if "to" in name_split[2]:
SCREAMING_SNAKE_CASE_: List[Any] =int(name_split[2][-2:] )
else:
SCREAMING_SNAKE_CASE_: int =int(name_split[2][6:] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE_: Any =96
SCREAMING_SNAKE_CASE_: Tuple =(2, 2, 6, 2)
SCREAMING_SNAKE_CASE_: Dict =(3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE_: List[Any] =96
SCREAMING_SNAKE_CASE_: Tuple =(2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: List[Any] =(3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE_: Dict =128
SCREAMING_SNAKE_CASE_: List[str] =(2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] =(4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =192
SCREAMING_SNAKE_CASE_: Any =(2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: Optional[Any] =(6, 12, 24, 48)
if "to" in swinva_name:
SCREAMING_SNAKE_CASE_: int =(12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
SCREAMING_SNAKE_CASE_: List[str] =2_1841
SCREAMING_SNAKE_CASE_: List[Any] ="huggingface/label-files"
SCREAMING_SNAKE_CASE_: List[Any] ="imagenet-22k-id2label.json"
SCREAMING_SNAKE_CASE_: Dict =json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: List[str] ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Tuple =idalabel
SCREAMING_SNAKE_CASE_: Any ={v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE_: Dict =1000
SCREAMING_SNAKE_CASE_: str ="huggingface/label-files"
SCREAMING_SNAKE_CASE_: str ="imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: int =json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: int ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: List[Any] =idalabel
SCREAMING_SNAKE_CASE_: List[str] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: int =img_size
SCREAMING_SNAKE_CASE_: Tuple =num_classes
SCREAMING_SNAKE_CASE_: Dict =embed_dim
SCREAMING_SNAKE_CASE_: str =depths
SCREAMING_SNAKE_CASE_: Any =num_heads
SCREAMING_SNAKE_CASE_: int =window_size
return config
def __magic_name__ ( lowercase ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE_: Tuple ="encoder." + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_: List[Any] ="layernorm.weight"
if name == "norm.bias":
SCREAMING_SNAKE_CASE_: List[Any] ="layernorm.bias"
if "head" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE_: List[Any] ="swinv2." + name
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: List[str] =orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE_: List[str] =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[int] =int(key_split[1] )
SCREAMING_SNAKE_CASE_: Dict =int(key_split[3] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE_: List[str] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[int] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Tuple =val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: List[Any] =val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_: Optional[int] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Optional[int] =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE_: int =get_swinva_config(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: int =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] =convert_state_dict(timm_model.state_dict() , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: Optional[int] ="http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: List[Any] =AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE_: str =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE_: int =timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE_: Tuple =model(**__SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 703 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36 | 0 |
def _snake_case (__lowercase):
UpperCamelCase_ = abs(__lowercase)
UpperCamelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def _snake_case (__lowercase):
UpperCamelCase_ = abs(__lowercase)
return n if n < 10 else n % 10 + sum_of_digits(n // 10)
def _snake_case (__lowercase):
return sum(int(__lowercase) for c in str(abs(__lowercase)))
def _snake_case ():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowercase , __lowercase) -> None:
UpperCamelCase_ = f"""{func.__name__}({value})"""
UpperCamelCase_ = timeit(f"""__main__.{call}""" , setup='import __main__')
print(f"""{call:56} = {func(__lowercase)} -- {timing:.4f} seconds""")
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowercase , __lowercase)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 23 | from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowercase : List[Any] =logging.get_logger(__name__)
_lowercase : str ={
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "marian"
lowercase : Dict = ["past_key_values"]
lowercase : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_81_01 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any=10_24 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : Tuple=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[str]=40_96 , SCREAMING_SNAKE_CASE__ : Dict=16 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : str=10_24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0_2 , SCREAMING_SNAKE_CASE__ : int=5_81_00 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[int]=5_81_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : int=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Union[str, Any]:
A : Optional[Any] =vocab_size
A : List[Any] =decoder_vocab_size or vocab_size
A : Optional[Any] =max_position_embeddings
A : Optional[int] =d_model
A : Dict =encoder_ffn_dim
A : List[str] =encoder_layers
A : Optional[Any] =encoder_attention_heads
A : str =decoder_ffn_dim
A : Optional[Any] =decoder_layers
A : int =decoder_attention_heads
A : Optional[Any] =dropout
A : Tuple =attention_dropout
A : str =activation_dropout
A : int =activation_function
A : int =init_std
A : Union[str, Any] =encoder_layerdrop
A : str =decoder_layerdrop
A : List[str] =use_cache
A : Optional[int] =encoder_layers
A : Tuple =scale_embedding # scale factor will be sqrt(d_model) if True
A : Tuple =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
A : Any =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A : Optional[Any] ={0: 'batch'}
A : Optional[int] ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A : Tuple ={0: 'batch', 1: 'decoder_sequence'}
A : List[Any] ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A : Dict =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A : List[Any] =self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Dict ={0: 'batch', 2: 'past_sequence + sequence'}
A : Any ={0: 'batch', 2: 'past_sequence + sequence'}
else:
A : List[str] =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
A : List[Any] =super().outputs
else:
A : Optional[Any] =super(SCREAMING_SNAKE_CASE__ , self ).outputs
if self.use_past:
A , A : List[str] =self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ={0: 'batch', 2: 'past_sequence + sequence'}
A : Union[str, Any] ={0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
A : str =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Generate decoder inputs
A : List[Any] =seq_length if not self.use_past else 1
A : Dict =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : List[str] ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A : Optional[Any] =dict(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A : List[Any] =common_inputs['input_ids'].shape
A : Tuple =common_inputs['decoder_input_ids'].shape[1]
A , A : Union[str, Any] =self.num_attention_heads
A : List[str] =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A : List[Any] =decoder_seq_length + 3
A : str =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A : Tuple =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , dim=1 )
A : List[Any] =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A : List[Any] =self.num_layers
A : Optional[Any] =min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[int] =max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - min_num_layers
A : str ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
) )
# TODO: test this.
A : List[str] =encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
A : str =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A : str =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A : str =seqlen + 2
A , A : Dict =self.num_layers
A , A : Any =self.num_attention_heads
A : Optional[int] =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A : str =common_inputs['attention_mask'].dtype
A : Optional[int] =torch.cat(
[common_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
A : Dict =[
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A : List[Any] =compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A : Union[str, Any] =tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
A : Optional[Any] =[' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A : Tuple =dict(tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
A : Union[str, Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
else:
A : Dict =self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
A : List[Any] =super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
A : List[str] =super(SCREAMING_SNAKE_CASE__ , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> float:
return 1e-4
| 305 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = a.name
lowercase_ = b.name
lowercase_ = ''''''
lowercase_ = ''''''
lowercase_ = a == b
lowercase_ = name_a
lowercase_ = name_b
return res
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowerCAmelCase , __lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCAmelCase , __lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __lowerCAmelCase , __lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = list(model.graph.initializer )
lowercase_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ = inits[i].name
lowercase_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.dirname(__lowerCAmelCase )
lowercase_ = os.path.basename(__lowerCAmelCase )
lowercase_ = onnx.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = list(model.graph.initializer )
lowercase_ = set()
lowercase_ = {}
lowercase_ = []
lowercase_ = 0
for i in range(len(__lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__lowerCAmelCase )
dup_set.add(__lowerCAmelCase )
lowercase_ = inits[j].data_type
lowercase_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , __lowerCAmelCase )
total_reduced_size += mem_size
lowercase_ = inits[i].name
lowercase_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowerCAmelCase )
else:
lowercase_ = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 10_24 / 10_24 / 10_24 , """GB""" )
lowercase_ = sorted(__lowerCAmelCase )
_remove_dup_initializers_from_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = '''optimized_''' + model_file_name
lowercase_ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
onnx.save(__lowerCAmelCase , __lowerCAmelCase )
return new_model
| 718 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=2_81_23 ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__lowerCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 100 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __UpperCAmelCase ( __a : Any ) -> Optional[Any]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _a , _a ) -> int:
super().__init__()
_a : List[Any] = module
_a : Optional[Any] = nn.Sequential(
nn.Linear(module.in_features , __lowercase , bias=__lowercase ) , nn.Linear(__lowercase , module.out_features , bias=__lowercase ) , )
_a : List[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__lowercase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __lowercase ( self , _a , *_a , **_a ) -> int:
return self.module(__lowercase , *__lowercase , **__lowercase ) + self.adapter(__lowercase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "bigscience/bloom-1b7"
# Constant values
UpperCAmelCase__ : str = 2.109659552692574
UpperCAmelCase__ : Union[str, Any] = "Hello my name is"
UpperCAmelCase__ : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
UpperCAmelCase__ : Optional[Any] = 10
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = AutoTokenizer.from_pretrained(self.model_name )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
# Models and tokenizer
_a : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
_a : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowercase , device_map='''auto''' )
def __lowercase ( self ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__lowercase , '''quantization_config''' ) )
_a : Optional[int] = config.to_dict()
_a : Tuple = config.to_diff_dict()
_a : Optional[int] = config.to_json_string()
def __lowercase ( self ) -> Dict:
from bitsandbytes.nn import Paramsabit
_a : List[str] = self.model_fpaa.get_memory_footprint()
_a : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_a : str = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __lowercase ( self ) -> Optional[int]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__lowercase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __lowercase ( self ) -> str:
_a : Optional[int] = self.tokenizer(self.input_text , return_tensors='''pt''' )
_a : Optional[Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowercase ) , self.EXPECTED_OUTPUTS )
def __lowercase ( self ) -> Tuple:
_a : Dict = BitsAndBytesConfig()
_a : Dict = True
_a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__lowercase , device_map='''auto''' )
_a : List[str] = self.tokenizer(self.input_text , return_tensors='''pt''' )
_a : Any = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowercase ) , self.EXPECTED_OUTPUTS )
def __lowercase ( self ) -> Tuple:
with self.assertRaises(__lowercase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__lowercase )
def __lowercase ( self ) -> Any:
_a : List[Any] = BitsAndBytesConfig()
with self.assertRaises(__lowercase ):
_a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__lowercase , load_in_abit=__lowercase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __lowercase ( self ) -> str:
with self.assertRaises(__lowercase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(__lowercase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__lowercase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(__lowercase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__lowercase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
_a : Optional[int] = self.model_fpaa.to(torch.floataa )
_a : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
_a : List[Any] = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
_a : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
_a : Dict = self.model_fpaa.float()
def __lowercase ( self ) -> int:
_a : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__lowercase , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls ) -> List[Any]:
_a : int = '''t5-small'''
_a : Dict = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
_a : Union[str, Any] = AutoTokenizer.from_pretrained(cls.model_name )
_a : Optional[int] = '''Translate in German: Hello, my dog is cute'''
def __lowercase ( self ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Any:
from transformers import TaForConditionalGeneration
_a : Dict = TaForConditionalGeneration._keep_in_fpaa_modules
_a : Any = None
# test with `t5-small`
_a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowercase , device_map='''auto''' )
_a : List[str] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
_a : List[str] = model.generate(**__lowercase )
# test with `flan-t5-small`
_a : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__lowercase , device_map='''auto''' )
_a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
_a : List[Any] = model.generate(**__lowercase )
_a : Tuple = modules
def __lowercase ( self ) -> Dict:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_a : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowercase , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_a : Optional[int] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
_a : Optional[Any] = model.generate(**__lowercase )
# test with `flan-t5-small`
_a : Optional[int] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__lowercase , device_map='''auto''' )
_a : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
_a : List[str] = model.generate(**__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
super().setUp()
# model_name
_a : Optional[int] = '''bigscience/bloom-560m'''
_a : int = '''t5-small'''
# Different types of model
_a : Any = AutoModel.from_pretrained(self.model_name , load_in_abit=__lowercase , device_map='''auto''' )
# Sequence classification model
_a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__lowercase , device_map='''auto''' )
# CausalLM model
_a : Dict = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowercase , device_map='''auto''' )
# Seq2seq model
_a : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__lowercase , device_map='''auto''' )
def __lowercase ( self ) -> Optional[int]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[Any]:
super().setUp()
def __lowercase ( self ) -> Optional[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_a : Optional[Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
super().setUp()
def __lowercase ( self ) -> List[str]:
_a : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__lowercase , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_a : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
_a : Optional[int] = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__lowercase ) , self.EXPECTED_OUTPUTS )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Tuple = '''facebook/opt-350m'''
super().setUp()
def __lowercase ( self ) -> Any:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
_a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowercase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_a : List[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_a : str = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__lowercase ) ):
_a : Tuple = LoRALayer(module.q_proj , rank=1_6 )
_a : Optional[int] = LoRALayer(module.k_proj , rank=1_6 )
_a : Optional[Any] = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
_a : str = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_a : int = model.forward(**__lowercase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__lowercase , __lowercase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__lowercase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = "gpt2-xl"
UpperCAmelCase__ : Union[str, Any] = 3.3191854854152187
| 14 |
'''simple docstring'''
import string
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__lowercase =''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase =string.ascii_uppercase.find(lowercase__ )
__lowercase =num - key
if num < 0:
__lowercase =num + len(string.ascii_uppercase )
__lowercase =translated + string.ascii_uppercase[num]
else:
__lowercase =translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =input('Encrypted message: ' )
__lowercase =message.upper()
decrypt(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 119 | 0 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def A_ ( ) -> List[str]:
a : List[Any] = 9
a : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a : int = kruskal(UpperCAmelCase__ , UpperCAmelCase__ )
a : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(UpperCAmelCase__ ) == sorted(UpperCAmelCase__ )
| 710 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def A_ ( UpperCAmelCase__ = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A_ ( UpperCAmelCase__ = "" ) -> bool:
if len(UpperCAmelCase__ ) == 0:
return True
a : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
a : dict[str, int] = {}
for character in lower_case_input_str:
a : Optional[Any] = character_freq_dict.get(UpperCAmelCase__ , 0 ) + 1
a : Any = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A_ ( UpperCAmelCase__ = "" ) -> None:
print('\nFor string = ' , UpperCAmelCase__ , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(UpperCAmelCase__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__ : List[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 509 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Union[str, Any]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _A ( __UpperCAmelCase ):
"""simple docstring"""
a__ : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__UpperCAmelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = model
a__ : Any = cache
a__ : List[Any] = force
a__ : Optional[int] = trust_remote_code
def _A ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 191 |
import unittest
from knapsack import knapsack as k
class _a ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
a__ : List[str] = 0
a__ : Union[str, Any] = [0]
a__ : Optional[Any] = [0]
a__ : Tuple = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 0 )
a__ : Optional[Any] = [60]
a__ : Tuple = [10]
a__ : Optional[Any] = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 0 )
def _A ( self ):
"""simple docstring"""
a__ : Dict = 3
a__ : Optional[int] = [1, 2, 3]
a__ : List[Any] = [3, 2, 1]
a__ : List[Any] = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 5 )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = 50
a__ : Union[str, Any] = [60, 100, 120]
a__ : str = [10, 20, 30]
a__ : Union[str, Any] = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 191 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase__ ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase_ = VideoClassificationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase , top_k=2 )
UpperCamelCase_ = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def lowercase__ ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for example in examples:
UpperCamelCase_ = video_classifier(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'score': ANY(__UpperCAmelCase ), 'label': ANY(__UpperCAmelCase )},
{'score': ANY(__UpperCAmelCase ), 'label': ANY(__UpperCAmelCase )},
] , )
@require_torch
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase_ = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
UpperCamelCase_ = pipeline(
'video-classification' , model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , frame_sampling_rate=4 )
UpperCamelCase_ = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase_ = video_classifier(__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}] , )
UpperCamelCase_ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}],
[{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}],
] , )
@require_tf
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
| 710 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A ( lowerCamelCase_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_SCREAMING_SNAKE_CASE : int = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowercase__ ( self : Tuple , __UpperCAmelCase : Optional[Any]=0 ) -> int:
"""simple docstring"""
UpperCamelCase_ = floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCAmelCase ) )
UpperCamelCase_ = torch.manual_seed(__UpperCAmelCase )
UpperCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = self.get_dummy_inputs()
UpperCamelCase_ = pipe(**__UpperCAmelCase ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
@property
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ort.SessionOptions()
UpperCamelCase_ = False
return options
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase_ = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = 'A fantasy landscape, trending on artstation'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images
UpperCamelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase_ = init_image.resize((128, 128) )
UpperCamelCase_ = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
UpperCamelCase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCamelCase_ = 'A fantasy landscape, trending on artstation'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCAmelCase , output_type='np' , )
UpperCamelCase_ = output.images
UpperCamelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 559 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=sys.maxsize ) -> Union[str, Any]:
lowerCAmelCase__ = "bilinear"
lowerCAmelCase__ = max_size
lowerCAmelCase__ = short_edge_length
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase__ = []
for img in imgs:
lowerCAmelCase__ , lowerCAmelCase__ = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCAmelCase__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCAmelCase__ = size * 1.0 / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
lowerCAmelCase__ , lowerCAmelCase__ = size, scale * w
else:
lowerCAmelCase__ , lowerCAmelCase__ = scale * h, size
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > self.max_size:
lowerCAmelCase__ = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = newh * scale
lowerCAmelCase__ = neww * scale
lowerCAmelCase__ = int(neww + 0.5 )
lowerCAmelCase__ = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCAmelCase__ = Image.fromarray(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCAmelCase__ = np.asarray(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCAmelCase__ = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE__ ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE__ )
return img_augs
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
lowerCAmelCase__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCAmelCase__ = cfg.INPUT.FORMAT
lowerCAmelCase__ = cfg.SIZE_DIVISIBILITY
lowerCAmelCase__ = cfg.PAD_VALUE
lowerCAmelCase__ = cfg.INPUT.MAX_SIZE_TEST
lowerCAmelCase__ = cfg.MODEL.DEVICE
lowerCAmelCase__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCAmelCase__ = lambda SCREAMING_SNAKE_CASE__ : (x - self.pixel_mean) / self.pixel_std
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> Any:
lowerCAmelCase__ = tuple(max(SCREAMING_SNAKE_CASE__ ) for s in zip(*[img.shape for img in images] ) )
lowerCAmelCase__ = [im.shape[-2:] for im in images]
lowerCAmelCase__ = [
nn.functional.pad(
SCREAMING_SNAKE_CASE__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return torch.stack(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ )
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any=False ) -> Dict:
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE__ ) == 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE__ , images.pop(SCREAMING_SNAKE_CASE__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE__ , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCAmelCase__ = torch.tensor([im.shape[:2] for im in images] )
lowerCAmelCase__ = self.aug(SCREAMING_SNAKE_CASE__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCAmelCase__ = [self.normalizer(SCREAMING_SNAKE_CASE__ ) for x in images]
# now pad them to do the following operations
lowerCAmelCase__ , lowerCAmelCase__ = self.pad(SCREAMING_SNAKE_CASE__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCAmelCase__ = torch.true_divide(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple[int, int] ):
"""simple docstring"""
assert torch.isfinite(lowerCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
lowerCAmelCase__ , lowerCAmelCase__ = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase_ )
| 61 |
"""simple docstring"""
import argparse
import struct
import unittest
class a :
def __init__( self : List[str] , lowerCAmelCase : bytes ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =data
# Initialize hash values
SCREAMING_SNAKE_CASE_: int =[
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE_: List[Any] =[
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
SCREAMING_SNAKE_CASE_: int =self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : bytes ) -> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =B"""\x80""" + (B"""\x00""" * (63 - (len(lowerCAmelCase ) + 8) % 64))
SCREAMING_SNAKE_CASE_: List[Any] =struct.pack(""">Q""" , (len(lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def lowerCamelCase__ ( self : Optional[Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =[
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE_: Optional[int] =list(struct.unpack(""">16L""" , lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE_: Any =(
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE_: Tuple =(
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE_: List[str] =(
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
SCREAMING_SNAKE_CASE_: Dict =self.ror(lowerCAmelCase , 6 ) ^ self.ror(lowerCAmelCase , 11 ) ^ self.ror(lowerCAmelCase , 25 )
SCREAMING_SNAKE_CASE_: Any =(e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
SCREAMING_SNAKE_CASE_: Union[str, Any] =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
SCREAMING_SNAKE_CASE_: List[Any] =self.ror(lowerCAmelCase , 2 ) ^ self.ror(lowerCAmelCase , 13 ) ^ self.ror(lowerCAmelCase , 22 )
SCREAMING_SNAKE_CASE_: int =(a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE_: Optional[int] =(sa + maj) % 0X100_000_000
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =(
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
SCREAMING_SNAKE_CASE_: Dict =[a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE_: List[Any] =[
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE_: Tuple ="""""".join([hex(lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
'''simple docstring'''
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> None:
'''simple docstring'''
import hashlib
SCREAMING_SNAKE_CASE_: Union[str, Any] =bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowerCAmelCase ).hash , hashlib.shaaaa(lowerCAmelCase ).hexdigest() )
def __magic_name__ ( ):
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE_: Any =parser.parse_args()
SCREAMING_SNAKE_CASE_: List[str] =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[Any] =f.read()
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =bytes(lowercase , """utf-8""" )
print(SHAaaa(lowercase ).hash )
if __name__ == "__main__":
main()
| 409 | 0 |
import re
from filelock import FileLock
try:
import nltk
__magic_name__ =True
except (ImportError, ModuleNotFoundError):
__magic_name__ =False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __UpperCamelCase ( A ):
re.sub('''<n>''' , '''''' , A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A ) )
| 469 | import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=None , ) -> str:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = decoder_seq_length
# For common tests
UpperCamelCase__ = self.decoder_seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = decoder_start_token_id
UpperCamelCase__ = use_cache
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = None
UpperCamelCase__ = decoder_seq_length
UpperCamelCase__ = 2
UpperCamelCase__ = 1
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = True
UpperCamelCase__ = TrOCRDecoder(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) + 1 )
UpperCamelCase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class _A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Any =(TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] ={"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : Optional[int] =True
SCREAMING_SNAKE_CASE_ : Dict =False
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = TrOCRStandaloneDecoderModelTester(self , is_training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Dict:
'''simple docstring'''
pass
def _a (self ) -> List[str]:
'''simple docstring'''
pass
def _a (self ) -> int:
'''simple docstring'''
pass
def _a (self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _a (self ) -> Optional[int]:
'''simple docstring'''
pass
| 469 | 1 |
from __future__ import annotations
import math
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
)
def a():
'''simple docstring'''
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(lowercase__ ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , lowercase__ , lowercase__ , lowercase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 187 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = KandinskyImgaImgPipeline
__A = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
__A = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__A = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__A = False
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return 1_00
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
snake_case_ = MultilingualCLIP(__UpperCamelCase )
snake_case_ = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case_ = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
snake_case_ = DDIMScheduler(**__UpperCamelCase )
snake_case_ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCamelCase )
# create init_image
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case_ = torch.manual_seed(__UpperCamelCase )
else:
snake_case_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case_ = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**__UpperCamelCase )
snake_case_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case_ = 'A red cartoon frog, 4k'
snake_case_ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
snake_case_ = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case_ = pipeline(
__UpperCamelCase , image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='np' , )
snake_case_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 187 | 1 |
'''simple docstring'''
def A ( snake_case__ : list[list[float]] ) -> int:
'''simple docstring'''
__snake_case = []
for data in source_data:
for i, el in enumerate(__lowerCAmelCase ):
if len(__lowerCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowerCAmelCase ) )
return data_lists
def A ( snake_case__ : list[list[float]] , snake_case__ : list[int] ) -> Tuple:
'''simple docstring'''
__snake_case = []
for dlist, weight in zip(__lowerCAmelCase , __lowerCAmelCase ):
__snake_case = min(__lowerCAmelCase )
__snake_case = max(__lowerCAmelCase )
__snake_case = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__snake_case = f"Invalid weight of {weight:f} provided"
raise ValueError(__lowerCAmelCase )
score_lists.append(__lowerCAmelCase )
return score_lists
def A ( snake_case__ : list[list[float]] ) -> List[str]:
'''simple docstring'''
__snake_case = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowerCAmelCase ):
__snake_case = final_scores[j] + ele
return final_scores
def A ( snake_case__ : list[list[float]] , snake_case__ : list[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = get_data(__lowerCAmelCase )
__snake_case = calculate_each_score(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = generate_final_scores(__lowerCAmelCase )
# append scores to source data
for i, ele in enumerate(__lowerCAmelCase ):
source_data[i].append(__lowerCAmelCase )
return source_data
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A_ ( unittest.TestCase ):
_UpperCAmelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCAmelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : List[Any] = AudioClassificationPipeline(model=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__)
# test with a raw waveform
__lowerCamelCase : str = np.zeros((3_4_0_0_0,))
__lowerCamelCase : List[Any] = np.zeros((1_4_0_0_0,))
return audio_classifier, [audioa, audio]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = examples
__lowerCamelCase : Tuple = audio_classifier(SCREAMING_SNAKE_CASE__)
# by default a model is initialized with num_labels=2
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,[
{'score': ANY(SCREAMING_SNAKE_CASE__), 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': ANY(SCREAMING_SNAKE_CASE__), 'label': ANY(SCREAMING_SNAKE_CASE__)},
] ,)
__lowerCamelCase : Optional[Any] = audio_classifier(SCREAMING_SNAKE_CASE__ ,top_k=1)
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,[
{'score': ANY(SCREAMING_SNAKE_CASE__), 'label': ANY(SCREAMING_SNAKE_CASE__)},
] ,)
self.run_torchaudio(SCREAMING_SNAKE_CASE__)
@require_torchaudio
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any]):
import datasets
# test with a local file
__lowerCamelCase : int = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation')
__lowerCamelCase : Tuple = dataset[0]['audio']['array']
__lowerCamelCase : Union[str, Any] = audio_classifier(SCREAMING_SNAKE_CASE__)
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,[
{'score': ANY(SCREAMING_SNAKE_CASE__), 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': ANY(SCREAMING_SNAKE_CASE__), 'label': ANY(SCREAMING_SNAKE_CASE__)},
] ,)
@require_torch
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = 'anton-l/wav2vec2-random-tiny-classifier'
__lowerCamelCase : str = pipeline('audio-classification' ,model=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = np.ones((8_0_0_0,))
__lowerCamelCase : int = audio_classifier(SCREAMING_SNAKE_CASE__ ,top_k=4)
__lowerCamelCase : int = [
{'score': 0.0842, 'label': 'no'},
{'score': 0.0838, 'label': 'up'},
{'score': 0.0837, 'label': 'go'},
{'score': 0.0834, 'label': 'right'},
]
__lowerCamelCase : Union[str, Any] = [
{'score': 0.0845, 'label': 'stop'},
{'score': 0.0844, 'label': 'on'},
{'score': 0.0841, 'label': 'right'},
{'score': 0.0834, 'label': 'left'},
]
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
__lowerCamelCase : Tuple = {'array': np.ones((8_0_0_0,)), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
__lowerCamelCase : Optional[int] = audio_classifier(SCREAMING_SNAKE_CASE__ ,top_k=4)
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
@slow
def lowerCAmelCase ( self : Union[str, Any]):
import datasets
__lowerCamelCase : Tuple = 'superb/wav2vec2-base-superb-ks'
__lowerCamelCase : List[Any] = pipeline('audio-classification' ,model=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = datasets.load_dataset('anton-l/superb_dummy' ,'ks' ,split='test')
__lowerCamelCase : str = np.array(dataset[3]['speech'] ,dtype=np.floataa)
__lowerCamelCase : Union[str, Any] = audio_classifier(SCREAMING_SNAKE_CASE__ ,top_k=4)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=3) ,[
{'score': 0.981, 'label': 'go'},
{'score': 0.007, 'label': 'up'},
{'score': 0.006, 'label': '_unknown_'},
{'score': 0.001, 'label': 'down'},
] ,)
@require_tf
@unittest.skip('Audio classification is not implemented for TF')
def lowerCAmelCase ( self : List[str]):
pass
| 652 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowercase :
def __init__( self , lowercase_ , lowercase_=3 , lowercase_=3_2 , lowercase_=3 , lowercase_=1_0 , lowercase_=[8, 1_6, 3_2, 6_4] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , lowercase_=["stage2", "stage3", "stage4"] , lowercase_=[2, 3, 4] , lowercase_=1 , ) -> Tuple:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = embeddings_size
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = scope
__snake_case = len(lowercase_)
__snake_case = out_features
__snake_case = out_indices
__snake_case = num_groups
def _a ( self) -> Union[str, Any]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels)
__snake_case = self.get_config()
return config, pixel_values, labels
def _a ( self) -> Optional[int]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> int:
__snake_case = BitModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> str:
__snake_case = self.num_labels
__snake_case = BitForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> int:
__snake_case = BitBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
__snake_case = None
__snake_case = BitBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _a ( self) -> Optional[int]:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__UpperCAmelCase = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> int:
__snake_case = BitModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def _a ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self) -> Optional[Any]:
return
@unittest.skip(reason='Bit does not output attentions')
def _a ( self) -> str:
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def _a ( self) -> List[Any]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def _a ( self) -> Dict:
pass
def _a ( self) -> Dict:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
__snake_case = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def _a ( self) -> List[str]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_)
def _a ( self) -> Any:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(config=lowercase_)
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def _a ( self) -> str:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_):
__snake_case = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_))
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(lowercase_) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case = layer_type
__snake_case = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
@unittest.skip(reason='Bit does not use feedforward chunking')
def _a ( self) -> Union[str, Any]:
pass
def _a ( self) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _a ( self) -> List[str]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = BitModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def A ( ) -> List[str]:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> str:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def _a ( self) -> List[Any]:
__snake_case = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowercase_)
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowercase_ , return_tensors='pt').to(lowercase_)
# forward pass
with torch.no_grad():
__snake_case = model(**lowercase_)
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase_)
__snake_case = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@require_torch
class __lowercase ( lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
__UpperCAmelCase = BitConfig
__UpperCAmelCase = False
def _a ( self) -> List[Any]:
__snake_case = BitModelTester(self)
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase_ = logging.getLogger(__name__)
def __magic_name__ ( lowercase , lowercase ) -> str:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__a : int = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[Any] = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Union[str, Any] = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Any = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : int = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : Any = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : int = field(
default=lowerCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __magic_name__ ( ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ : Optional[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase )
# Set seed
set_seed(training_args.seed )
try:
lowercase_ : int = processors[data_args.task_name]()
lowercase_ : Union[str, Any] = processor.get_labels()
lowercase_ : Union[str, Any] = len(lowercase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase_ : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase_ : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase_ : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowercase ) -> Dict:
lowercase_ : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowercase , p.label_ids )}
# Data collator
lowercase_ : Any = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase_ : Dict = Trainer(
model=lowercase , args=lowercase , train_dataset=lowercase , eval_dataset=lowercase , compute_metrics=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase_ : List[str] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase_ : str = trainer.evaluate()
lowercase_ : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowercase , lowercase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowercase )
return results
def __magic_name__ ( lowercase ) -> Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 458 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> list[list[int]]:
_UpperCAmelCase : list[list[int]] = []
create_all_state(1 , lowerCAmelCase , lowerCAmelCase , [] , lowerCAmelCase )
return result
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: list[int] , lowerCAmelCase: list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowerCAmelCase , total_number - level + 2 ):
current_list.append(lowerCAmelCase )
create_all_state(i + 1 , lowerCAmelCase , level - 1 , lowerCAmelCase , lowerCAmelCase )
current_list.pop()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[list[int]] ) -> None:
for i in total_list:
print(*lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 300 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = MobileBertConfig.from_json_file(_lowerCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_ = MobileBertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE_ = load_tf_weights_in_mobilebert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 705 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE ="""%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__SCREAMING_SNAKE_CASE =f"""https://www.google.com/search?q={query}&num=100"""
__SCREAMING_SNAKE_CASE =requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__SCREAMING_SNAKE_CASE =(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__SCREAMING_SNAKE_CASE =parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 89 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' ,type=_lowercase ,default=1 ,help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' ,type=_lowercase ,help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) ,)
# rest from the training program
parser.add_argument('''training_script_args''' ,nargs=_lowercase )
return parser.parse_args()
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = parse_args()
# Import training_script as a module.
UpperCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase = script_fpath.stem
UpperCamelCase = importlib.import_module(_lowercase )
# Patch sys.argv
UpperCamelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main() | 34 |
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 472 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_UpperCAmelCase : Any = get_logger(__name__)
_UpperCAmelCase : Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , A_ , **A_ ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
UpperCamelCase = inspect.signature(processor.__call__ ).parameters
if len(A_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
UpperCamelCase = processor(A_ , A_ , A_ , **A_ )
else:
UpperCamelCase = processor(A_ , A_ , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCamelCase = temperature
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores / self.temperature
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(A_ , A_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCamelCase = top_p
UpperCamelCase = filter_value
UpperCamelCase = min_tokens_to_keep
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , scores.shape[-1] )
UpperCamelCase = jnp.full_like(A_ , self.filter_value )
UpperCamelCase = jax.nn.softmax(A_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase = jnp.roll(A_ , 1 )
score_mask |= score_mask.at[:, 0].set(A_ )
# min tokens to keep
UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(A_ )
UpperCamelCase = jnp.where(A_ , A_ , A_ )
UpperCamelCase = jax.lax.sort_key_val(A_ , A_ )[-1]
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> Any:
"""simple docstring"""
if not isinstance(A_ , A_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCamelCase = max(A_ , A_ )
UpperCamelCase = filter_value
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = scores.shape
UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , A_ )
UpperCamelCase = jnp.broadcast_to((jnp.arange(A_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase = topk_scores.flatten()
UpperCamelCase = topk_indices.flatten() + shift
UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(A_ )
UpperCamelCase = next_scores_flat.reshape(A_ , A_ )
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = bos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = max_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(A_ , A_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCamelCase = min_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
# create boolean flag to decide if min length penalty should be applied
UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
UpperCamelCase = begin_index
def __call__( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = list(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = dict(A_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase = force_token_array.at[index].set(A_ )
UpperCamelCase = jnp.intaa(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(A_ ):
UpperCamelCase = scores.shape[0]
UpperCamelCase = self.force_token_array[generation_idx]
UpperCamelCase = jnp.ones_like(A_ , dtype=scores.dtype ) * -float('inf' )
UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase = lax.dynamic_update_slice(A_ , A_ , (0, current_token) )
return new_scores
UpperCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_ ) , lambda: scores , ) , )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = generate_config.eos_token_id
UpperCamelCase = generate_config.no_timestamps_token_id
UpperCamelCase = generate_config.no_timestamps_token_id + 1
UpperCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A_ , 'max_initial_timestamp_index' ):
UpperCamelCase = generate_config.max_initial_timestamp_index
else:
UpperCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase = model_config.vocab_size
def __call__( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(A_ , A_ ):
UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , )
UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , )
return jnp.where(
A_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
UpperCamelCase = jnp.where(cur_len == self.begin_index , A_ , A_ )
UpperCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , )
UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase = jnp.where(
A_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , A_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase = jax.nn.log_softmax(A_ , axis=-1 )
def handle_cumulative_probs(A_ , A_ ):
UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
return scores
| 3 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_snake_case : Dict = parser.parse_args()
_snake_case : List[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 693 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase_ ( _lowercase ) -> Any:
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase_ ( ) -> Optional[int]:
__A : List[str] = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=_lowercase )
__A : Optional[int] = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_lowercase )
EnvironmentCommand.register_subcommand(_lowercase )
TestCommand.register_subcommand(_lowercase )
RunBeamCommand.register_subcommand(_lowercase )
DummyDataCommand.register_subcommand(_lowercase )
# Parse args
__A : Dict = parser.parse_known_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
__A : List[Any] = parse_unknown_args(_lowercase )
# Run
__A : Any = args.func(_lowercase , **_lowercase )
service.run()
if __name__ == "__main__":
main()
| 720 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCamelCase_ ( _lowercase ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCamelCase_ ( _lowercase ) -> Dict:
__A : Dict = create_tensor(_lowercase )
__A : int = gather(_lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCamelCase_ ( _lowercase ) -> str:
__A : Tuple = [state.process_index]
__A : Optional[int] = gather_object(_lowercase )
assert len(_lowercase ) == state.num_processes, F"{gathered_obj}, {len(_lowercase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def lowerCamelCase_ ( _lowercase ) -> str:
__A : List[str] = create_tensor(_lowercase )
__A : Any = broadcast(_lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCamelCase_ ( _lowercase ) -> str:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__A : str = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__A : Dict = torch.arange(state.num_processes ).to(state.device )
__A : Optional[Any] = pad_across_processes(_lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCamelCase_ ( _lowercase ) -> str:
# For now runs on only two processes
if state.num_processes != 2:
return
__A : Dict = create_tensor(_lowercase )
__A : int = reduce(_lowercase , "sum" )
__A : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F"{reduced_tensor} != {truth_tensor}"
def lowerCamelCase_ ( _lowercase ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
__A : Tuple = create_tensor(_lowercase )
__A : List[str] = reduce(_lowercase , "mean" )
__A : Union[str, Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F"{reduced_tensor} != {truth_tensor}"
def lowerCamelCase_ ( _lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def lowerCamelCase_ ( ) -> List[str]:
__A : Optional[int] = PartialState()
state.print(F"State: {state}" )
state.print("testing gather" )
test_gather(_lowercase )
state.print("testing gather_object" )
test_gather_object(_lowercase )
state.print("testing broadcast" )
test_broadcast(_lowercase )
state.print("testing pad_across_processes" )
test_pad_across_processes(_lowercase )
state.print("testing reduce_sum" )
test_reduce_sum(_lowercase )
state.print("testing reduce_mean" )
test_reduce_mean(_lowercase )
if __name__ == "__main__":
main()
| 387 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __snake_case ( _lowercase):
snake_case__ : Tuple = ["image_processor", "tokenizer"]
snake_case__ : Dict = "BlipImageProcessor"
snake_case__ : Optional[Any] = "AutoTokenizer"
def __init__( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
# add QFormer tokenizer
_lowerCamelCase : Optional[Any] = qformer_tokenizer
def __call__( self : Optional[Any] , __lowerCAmelCase : ImageInput = None , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
_lowerCamelCase : int = BatchFeature()
if text is not None:
_lowerCamelCase : Union[str, Any] = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
encoding.update(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.qformer_tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = qformer_text_encoding.pop('''input_ids''' )
_lowerCamelCase : Optional[Any] = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
_lowerCamelCase : str = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
encoding.update(__lowerCAmelCase )
return encoding
def SCREAMING_SNAKE_CASE ( self : List[str] , *__lowerCAmelCase : str , **__lowerCAmelCase : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer.model_input_names
_lowerCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
if os.path.isfile(__lowerCAmelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__lowerCAmelCase )
return super().save_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(__lowerCAmelCase , subfolder='''qformer_tokenizer''' )
_lowerCamelCase : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
args.append(__lowerCAmelCase )
return cls(*__lowerCAmelCase )
| 83 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase_ : Optional[Any] = NewType('''DataClass''', Any)
lowerCAmelCase_ : Any = NewType('''DataClassType''', Any)
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = {str(lowerCAmelCase ): choice for choice in choices}
return lambda lowerCAmelCase : str_to_choice.get(lowerCAmelCase , lowerCAmelCase )
def _lowerCAmelCase ( *,
lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = dataclasses.MISSING , lowerCAmelCase = dataclasses.MISSING , lowerCAmelCase = None , **lowerCAmelCase , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCAmelCase = {}
if aliases is not None:
UpperCAmelCase = aliases
if help is not None:
UpperCAmelCase = help
return dataclasses.field(metadata=lowerCAmelCase , default=lowerCAmelCase , default_factory=lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase_ ( a_ ):
_A : Iterable[DataClassType]
def __init__( self , snake_case__ , **snake_case__ ) -> List[str]:
"""simple docstring"""
if "formatter_class" not in kwargs:
UpperCAmelCase = ArgumentDefaultsHelpFormatter
super().__init__(**snake_case__ )
if dataclasses.is_dataclass(snake_case__ ):
UpperCAmelCase = [dataclass_types]
UpperCAmelCase = list(snake_case__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(snake_case__ )
@staticmethod
def UpperCamelCase_ ( snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = f'''--{field.name}'''
UpperCAmelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , snake_case__ ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
UpperCAmelCase = kwargs.pop("""aliases""" , [] )
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase = [aliases]
UpperCAmelCase = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(snake_case__ , """UnionType""" ) and isinstance(snake_case__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(snake_case__ ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f''' Problem encountered in field \'{field.name}\'.''' )
if type(snake_case__ ) not in field.type.__args__:
# filter `str` in Union
UpperCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCAmelCase = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCAmelCase = (
field.type.__args__[0] if isinstance(snake_case__ , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCAmelCase = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCAmelCase = {}
if origin_type is Literal or (isinstance(field.type , snake_case__ ) and issubclass(field.type , snake_case__ )):
if origin_type is Literal:
UpperCAmelCase = field.type.__args__
else:
UpperCAmelCase = [x.value for x in field.type]
UpperCAmelCase = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
UpperCAmelCase = field.default
else:
UpperCAmelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCAmelCase = copy(snake_case__ )
# Hack because type=bool in argparse does not behave as we want.
UpperCAmelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCAmelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCAmelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCAmelCase = """?"""
# This is the value that will get picked if we do --field_name (without value)
UpperCAmelCase = True
elif isclass(snake_case__ ) and issubclass(snake_case__ , snake_case__ ):
UpperCAmelCase = field.type.__args__[0]
UpperCAmelCase = """+"""
if field.default_factory is not dataclasses.MISSING:
UpperCAmelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCAmelCase = True
else:
UpperCAmelCase = field.type
if field.default is not dataclasses.MISSING:
UpperCAmelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCAmelCase = field.default_factory()
else:
UpperCAmelCase = True
parser.add_argument(snake_case__ , *snake_case__ , **snake_case__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCAmelCase = False
parser.add_argument(f'''--no_{field.name}''' , action="""store_false""" , dest=field.name , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ ) -> Any:
"""simple docstring"""
if hasattr(snake_case__ , """_argument_group_name""" ):
UpperCAmelCase = self.add_argument_group(dtype._argument_group_name )
else:
UpperCAmelCase = self
try:
UpperCAmelCase = get_type_hints(snake_case__ )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(snake_case__ ):
UpperCAmelCase = """.""".join(map(snake_case__ , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(snake_case__ ):
if not field.init:
continue
UpperCAmelCase = type_hints[field.name]
self._parse_dataclass_field(snake_case__ , snake_case__ )
def UpperCamelCase_ ( self , snake_case__=None , snake_case__=False , snake_case__=True , snake_case__=None , snake_case__=None , ) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCAmelCase = []
if args_filename:
args_files.append(Path(snake_case__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCAmelCase = ArgumentParser()
args_file_parser.add_argument(snake_case__ , type=snake_case__ , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCAmelCase , UpperCAmelCase = args_file_parser.parse_known_args(args=snake_case__ )
UpperCAmelCase = vars(snake_case__ ).get(args_file_flag.lstrip("""-""" ) , snake_case__ )
if cmd_args_file_paths:
args_files.extend([Path(snake_case__ ) for p in cmd_args_file_paths] )
UpperCAmelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCAmelCase , UpperCAmelCase = self.parse_known_args(args=snake_case__ )
UpperCAmelCase = []
for dtype in self.dataclass_types:
UpperCAmelCase = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
UpperCAmelCase = {k: v for k, v in vars(snake_case__ ).items() if k in keys}
for k in keys:
delattr(snake_case__ , snake_case__ )
UpperCAmelCase = dtype(**snake_case__ )
outputs.append(snake_case__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(snake_case__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
UpperCAmelCase = set(args.keys() )
UpperCAmelCase = []
for dtype in self.dataclass_types:
UpperCAmelCase = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
UpperCAmelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCAmelCase = dtype(**snake_case__ )
outputs.append(snake_case__ )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(snake_case__ )}''' )
return tuple(snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(snake_case__ ) , encoding="""utf-8""" ) as open_json_file:
UpperCAmelCase = json.loads(open_json_file.read() )
UpperCAmelCase = self.parse_dict(snake_case__ , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
UpperCAmelCase = self.parse_dict(yaml.safe_load(Path(snake_case__ ).read_text() ) , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
| 673 | 0 |
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__a = (boundary[1] - boundary[0]) / steps
__a = boundary[0]
__a = boundary[1]
__a = make_points(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__a = 0.0
y += (h / 2.0) * f(__lowerCamelCase )
for i in x_i:
# print(i)
y += h * f(__lowerCamelCase )
y += (h / 2.0) * f(__lowerCamelCase )
return y
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = a + h
while x < (b - h):
yield x
__a = x + h
def lowerCAmelCase( __lowerCamelCase ): # enter your function here
__a = (x - 0) * (x - 0)
return y
def lowerCAmelCase( ):
__a = 0.0 # Lower bound of integration
__a = 1.0 # Upper bound of integration
__a = 10.0 # define number of steps or resolution
__a = [a, b] # define boundary of integration
__a = method_a(__lowerCamelCase , __lowerCamelCase )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 700 | import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase_ : Union[str, Any] = 250_004
lowerCamelCase_ : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class a__ ( __snake_case , unittest.TestCase ):
A__ : Optional[Any] = MBartTokenizer
A__ : Optional[Any] = MBartTokenizerFast
A__ : Dict = True
A__ : Dict = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__a = MBartTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = MBartTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
__a = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__a = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__a = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__a = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__a = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCAmelCase )
__a = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCAmelCase )
__a = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
__a = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCAmelCase )
__a = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
__a = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCAmelCase )
__a = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
A__ : Optional[Any] = 'facebook/mbart-large-en-ro'
A__ : Union[str, Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
A__ : Tuple = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
A__ : Dict = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Optional[Any]:
__a = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__a = 1
return cls
def __SCREAMING_SNAKE_CASE ( self ) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0 )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
self.assertIn(UpperCAmelCase , self.tokenizer.all_special_ids )
__a = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
__a = self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
__a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , UpperCAmelCase )
__a = 1_0
__a = self.tokenizer(UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = tempfile.mkdtemp()
__a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase )
__a = MBartTokenizer.from_pretrained(UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , return_tensors='pt' )
__a = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
__a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.tokenizer(self.src_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=3 , return_tensors='pt' )
__a = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=1_0 , return_tensors='pt' )
__a = targets['input_ids']
__a = shift_tokens_right(UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
# A, test, EOS, en_XX
'input_ids': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
} , )
| 246 | 0 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__A : Any = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__A : Optional[int] = parser.parse_args()
__A : List[str] = "cpu"
__A : List[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__A : str = "path-to-your-trained-model"
__A : int = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__A : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__A : int = pipe.to(device)
# to channels last
__A : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__A : str = pipe.vae.to(memory_format=torch.channels_last)
__A : int = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__A : Union[str, Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__A : List[str] = torch.randn(2, 4, 64, 64)
__A : Any = torch.rand(1) * 999
__A : Optional[int] = torch.randn(2, 77, 768)
__A : int = (sample, timestep, encoder_hidden_status)
try:
__A : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__A : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__A : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__A : Tuple = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__A : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__A : Optional[int] = 666
__A : Optional[Any] = torch.Generator(device).manual_seed(seed)
__A : Any = {"generator": generator}
if args.steps is not None:
__A : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__A : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 275 | import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = LongformerTokenizer
__UpperCAmelCase = True
__UpperCAmelCase = LongformerTokenizerFast
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case : Optional[int] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__snake_case : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case : str = {'unk_token': '<unk>'}
__snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = 'lower newer'
__snake_case : List[Any] = 'lower newer'
return input_text, output_text
def lowercase_ ( self ):
__snake_case : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : str = 'lower newer'
__snake_case : List[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__snake_case : str = tokenizer.tokenize(_UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
__snake_case : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCAmelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowercase_ ( self ):
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
__snake_case : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__snake_case : Optional[int] = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : Tuple = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : str = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase_ ( self ):
__snake_case : int = self.get_tokenizer()
__snake_case : str = 'Encode this sequence.'
__snake_case : Optional[int] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
__snake_case : str = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing spaces after special tokens
__snake_case : int = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )} ) # mask token has a left space
__snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__snake_case : Any = 'Encode <mask> sequence'
__snake_case : str = 'Encode <mask>sequence'
__snake_case : Union[str, Any] = tokenizer.encode(_UpperCAmelCase )
__snake_case : Optional[Any] = encoded.index(_UpperCAmelCase )
__snake_case : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[Any] = tokenizer.encode(_UpperCAmelCase )
__snake_case : List[Any] = encoded.index(_UpperCAmelCase )
__snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : int = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : List[str] = 'A, <mask> AllenNLP sentence.'
__snake_case : Optional[Any] = tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
__snake_case : List[Any] = tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__snake_case : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__snake_case : str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowercase_ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCAmelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCAmelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCAmelCase )
def lowercase_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case : Any = F"""{text_of_1_token} {text_of_1_token}"""
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : List[str] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : str = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : List[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Any = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Optional[int] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ) + 1, 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Any = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__snake_case : Any = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__snake_case : Tuple = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
| 576 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError('String lengths must match!' )
__snake_case : List[str] = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 477 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
@staticmethod
def lowerCAmelCase (*snake_case_ : int , **snake_case_ : List[str] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCAmelCase (self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Dict ):
__a : Union[str, Any] = ObjectDetectionPipeline(model=snake_case_ , image_processor=snake_case_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] , snake_case_ : Any ):
__a : Any = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
'''score''': ANY(snake_case_ ),
'''label''': ANY(snake_case_ ),
'''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )},
} , )
import datasets
__a : List[Any] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__a : List[str] = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__a : Optional[int] = object_detector(snake_case_ , threshold=0.0 )
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for outputs in batch_outputs:
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
'''score''': ANY(snake_case_ ),
'''label''': ANY(snake_case_ ),
'''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def lowerCAmelCase (self : int ):
pass
@require_torch
def lowerCAmelCase (self : Tuple ):
__a : str = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__a : int = AutoModelForObjectDetection.from_pretrained(snake_case_ )
__a : int = AutoFeatureExtractor.from_pretrained(snake_case_ )
__a : Union[str, Any] = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
__a : List[str] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
] , )
__a : Dict = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : List[Any] ):
__a : Optional[Any] = '''facebook/detr-resnet-50'''
__a : int = AutoModelForObjectDetection.from_pretrained(snake_case_ )
__a : str = AutoFeatureExtractor.from_pretrained(snake_case_ )
__a : Tuple = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
__a : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
__a : Optional[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : Optional[int] ):
__a : Any = '''facebook/detr-resnet-50'''
__a : Optional[Any] = pipeline('''object-detection''' , model=snake_case_ )
__a : Optional[int] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
__a : List[str] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : Union[str, Any] ):
__a : Tuple = 0.9985
__a : Tuple = '''facebook/detr-resnet-50'''
__a : int = pipeline('''object-detection''' , model=snake_case_ )
__a : Optional[Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=snake_case_ )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCAmelCase (self : List[str] ):
__a : Optional[int] = '''Narsil/layoutlmv3-finetuned-funsd'''
__a : Any = 0.9993
__a : Tuple = pipeline('''object-detection''' , model=snake_case_ , threshold=snake_case_ )
__a : Optional[Any] = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
] , )
| 521 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCamelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCamelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCamelCase_ )
return parser.parse_args()
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : int = parse_args()
# Import training_script as a module.
lowerCamelCase__ : Tuple = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase__ : Optional[Any] = script_fpath.stem
lowerCamelCase__ : int = importlib.import_module(lowerCamelCase_ )
# Patch sys.argv
lowerCamelCase__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_A : int =get_tests_dir('''fixtures/test_sentencepiece.model''')
_A : Tuple ={'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_A : int ='''>>zh<<'''
_A : Dict ='''Helsinki-NLP/'''
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Optional[int] ='''tf'''
else:
_A : Dict ='''jax'''
@require_sentencepiece
class _lowercase ( _lowercase , unittest.TestCase ):
a = MarianTokenizer
a = False
a = True
def lowerCamelCase_ ( self: List[str] ):
super().setUp()
lowerCamelCase__ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCamelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] , **UpperCamelCase__: Any ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = """</s>"""
lowerCamelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def lowerCamelCase_ ( self: int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase__ : Optional[int] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase_ ( self: List[str] ):
# fmt: off
lowerCamelCase__ : int = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCamelCase__ : str = """Tämä on testi"""
lowerCamelCase__ : Any = """This is a test"""
lowerCamelCase__ : int = [76, 7, 2_047, 2]
lowerCamelCase__ : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 631 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
UpperCAmelCase = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
UpperCAmelCase = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
UpperCAmelCase = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string"""), """prediction_text""": datasets.Value("""string""")},
"""references""": {
"""id""": datasets.Value("""string"""),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string"""),
"""answer_start""": datasets.Value("""int32"""),
}),
},
}) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Tuple:
_lowerCamelCase : Dict = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_lowerCamelCase : Optional[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_lowerCamelCase : str = evaluate(dataset=SCREAMING_SNAKE_CASE , predictions=SCREAMING_SNAKE_CASE)
return score
| 88 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Union[str, Any] = "pix2struct_text_model"
UpperCamelCase_ : str = ["past_key_values"]
UpperCamelCase_ : str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a=5_02_44 , a=7_68 , a=64 , a=20_48 , a=12 , a=12 , a=32 , a=1_28 , a=0.1 , a=1e-6 , a=1.0 , a="gelu_new" , a=0 , a=False , a=0 , a=1 , a=False , a=True , **a , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = d_kv
_UpperCamelCase = d_ff
_UpperCamelCase = num_layers
_UpperCamelCase = num_heads
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = dropout_rate
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_factor
_UpperCamelCase = use_cache
_UpperCamelCase = eos_token_id
_UpperCamelCase = decoder_start_token_id
# for backwards compatibility
_UpperCamelCase = dense_act_fn
super().__init__(
pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , tie_word_embeddings=a , is_decoder=a , **a , )
@classmethod
def A_ ( cls , a , **a ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_UpperCamelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = "pix2struct_vision_model"
def __init__( self , a=7_68 , a=7_68 , a=20_48 , a=64 , a=12 , a=12 , a="gelu_new" , a=1e-6 , a=0.0 , a=0.0 , a=1e-10 , a=1.0 , a=40_96 , a=32 , a=1_28 , **a , ) -> Tuple:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = hidden_size
_UpperCamelCase = patch_embed_hidden_size
_UpperCamelCase = d_ff
_UpperCamelCase = dropout_rate
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = initializer_range
_UpperCamelCase = initializer_factor
_UpperCamelCase = attention_dropout
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = dense_act_fn
_UpperCamelCase = seq_len
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = d_kv
@classmethod
def A_ ( cls , a , **a ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_UpperCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Dict = "pix2struct"
UpperCamelCase_ : int = True
def __init__( self , a=None , a=None , a=1.0 , a=0.02 , a=False , a=False , a=True , **a , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(tie_word_embeddings=a , is_encoder_decoder=a , **a )
if text_config is None:
_UpperCamelCase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
_UpperCamelCase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
_UpperCamelCase = PixaStructTextConfig(**a )
_UpperCamelCase = PixaStructVisionConfig(**a )
_UpperCamelCase = self.text_config.decoder_start_token_id
_UpperCamelCase = self.text_config.pad_token_id
_UpperCamelCase = self.text_config.eos_token_id
_UpperCamelCase = initializer_factor
_UpperCamelCase = initializer_range
_UpperCamelCase = self.initializer_range
_UpperCamelCase = self.initializer_range
_UpperCamelCase = is_vqa
@classmethod
def A_ ( cls , a , a , **a ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.text_config.to_dict()
_UpperCamelCase = self.vision_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 612 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Dict=None , )->Dict:
if attention_mask is None:
A__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCamelCase__ )
if decoder_head_mask is None:
A__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
if cross_attn_head_mask is None:
A__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=7,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=99,__lowerCamelCase=16,__lowerCamelCase=2,__lowerCamelCase=4,__lowerCamelCase=4,__lowerCamelCase="relu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=20,__lowerCamelCase=2,__lowerCamelCase=1,__lowerCamelCase=0,):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = self.eos_token_id # Eos Token
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A__ = input_ids.clamp(self.pad_token_id + 1 )
A__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
A__ = self.get_config()
A__ = prepare_mam_aaa_inputs_dict(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
return config, inputs_dict
def UpperCamelCase ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,encoder_layerdrop=self.encoder_layerdrop,decoder_layerdrop=self.decoder_layerdrop,max_position_embeddings=self.max_position_embeddings,eos_token_id=self.eos_token_id,bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,)
def UpperCamelCase ( self ):
A__ , A__ = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = MaMaaaModel(config=__lowerCamelCase ).get_decoder().to(__lowerCamelCase ).eval()
A__ = inputs_dict['''input_ids''']
A__ = inputs_dict['''attention_mask''']
A__ = inputs_dict['''head_mask''']
# first forward pass
A__ = model(__lowerCamelCase,attention_mask=__lowerCamelCase,head_mask=__lowerCamelCase,use_cache=__lowerCamelCase )
A__ , A__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3),config.vocab_size )
A__ = ids_tensor((self.batch_size, 3),2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens],dim=-1 )
A__ = torch.cat([attention_mask, next_attn_mask],dim=-1 )
A__ = model(__lowerCamelCase,attention_mask=__lowerCamelCase )['''last_hidden_state''']
A__ = model(__lowerCamelCase,attention_mask=__lowerCamelCase,past_key_values=__lowerCamelCase )[
'''last_hidden_state'''
]
# select random slice
A__ = ids_tensor((1,),output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase,__lowerCamelCase,atol=1E-2 ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = MaMaaaModel(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
A__ = model(**__lowerCamelCase )
A__ = outputs.encoder_last_hidden_state
A__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = model.get_encoder()
encoder.save_pretrained(__lowerCamelCase )
A__ = MaMaaaEncoder.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
A__ = encoder(inputs_dict['''input_ids'''],attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = model.get_decoder()
decoder.save_pretrained(__lowerCamelCase )
A__ = MaMaaaDecoder.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
A__ = decoder(
input_ids=inputs_dict['''decoder_input_ids'''],attention_mask=inputs_dict['''decoder_attention_mask'''],encoder_hidden_states=__lowerCamelCase,encoder_attention_mask=inputs_dict['''attention_mask'''],)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCamelCase ( self ):
A__ = MaMaaaModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
A__ , A__ = model_class.from_pretrained(__lowerCamelCase,output_loading_info=__lowerCamelCase )
self.assertEqual(info['''missing_keys'''],[] )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
A__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = copy.deepcopy(self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) )
if not self.is_encoder_decoder:
A__ = inputs['''input_ids''']
del inputs["input_ids"]
else:
A__ = inputs['''input_ids''']
A__ = inputs.get('''decoder_input_ids''',__lowerCamelCase )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''',__lowerCamelCase )
A__ = model.get_input_embeddings()
if not self.is_encoder_decoder:
A__ = wte(__lowerCamelCase )
else:
A__ = wte(__lowerCamelCase )
A__ = wte(__lowerCamelCase )
with torch.no_grad():
model(**__lowerCamelCase )[0]
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs()
A__ = input_dict['''input_ids''']
A__ = input_ids.ne(1 ).to(__lowerCamelCase )
A__ = MaMaaaForConditionalGeneration(__lowerCamelCase ).eval().to(__lowerCamelCase )
if torch_device == "cuda":
model.half()
model.generate(__lowerCamelCase,attention_mask=__lowerCamelCase )
model.generate(num_beams=4,do_sample=__lowerCamelCase,early_stopping=__lowerCamelCase,num_return_sequences=3 )
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->str:
return torch.tensor(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
a__: int = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def UpperCamelCase ( self ):
A__ = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(__lowerCamelCase )
A__ = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
A__ = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
A__ = prepare_mam_aaa_inputs_dict(model.config,__lowerCamelCase,__lowerCamelCase )
with torch.no_grad():
A__ = model(**__lowerCamelCase )[0]
A__ = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape,__lowerCamelCase )
# change to expected output here
A__ = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]],device=__lowerCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3],__lowerCamelCase,atol=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(__lowerCamelCase )
# change to intended input
A__ = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
A__ = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
A__ = prepare_mam_aaa_inputs_dict(model.config,__lowerCamelCase,__lowerCamelCase )
with torch.no_grad():
A__ = model(**__lowerCamelCase )[0]
A__ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape,__lowerCamelCase )
# change to expected output here
A__ = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]],device=__lowerCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3],__lowerCamelCase,atol=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(__lowerCamelCase )
A__ = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''',src_lang='''fr''',tgt_lang='''en''' )
A__ = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
A__ = tokenizer(__lowerCamelCase,padding=__lowerCamelCase,return_tensors='''pt''' )
A__ = model.generate(
input_ids=dct['''input_ids'''].to(__lowerCamelCase ),attention_mask=dct['''attention_mask'''].to(__lowerCamelCase ),num_beams=5,forced_bos_token_id=tokenizer.get_lang_id('''en''' ),)
A__ = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
A__ = tokenizer.batch_decode(
hypotheses_batch.tolist(),clean_up_tokenization_spaces=__lowerCamelCase,skip_special_tokens=__lowerCamelCase )
assert generated == expected_en | 714 |
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] )->List[str]:
A__ = [1]
for i in range(2 , UpperCamelCase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
A__ = []
A__ = list(range(UpperCamelCase__ ) )
# Find permutation
while factorials:
A__ = factorials.pop()
A__ , A__ = divmod(UpperCamelCase__ , UpperCamelCase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self, snake_case__, snake_case__=13, snake_case__=64, snake_case__=2, snake_case__=3, snake_case__=True, snake_case__=True, snake_case__=32, snake_case__=5, snake_case__=4, snake_case__=37, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=10, snake_case__=0.02, snake_case__=[1, 16, 4, 4], snake_case__=None, ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = parent
lowercase_ : List[str] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : Any = patch_size
lowercase_ : List[str] = num_channels
lowercase_ : Optional[int] = is_training
lowercase_ : str = use_labels
lowercase_ : Optional[int] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : List[Any] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : Tuple = scope
lowercase_ : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowercase_ : Union[str, Any] = (self.image_size // 32) ** 2
lowercase_ : Union[str, Any] = num_patches + 1
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase_ : Dict = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : Optional[Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__snake_case, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__snake_case, )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : List[str] = ViTHybridModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowercase_ : Dict = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Optional[int] = self.type_sequence_label_size
lowercase_ : List[str] = ViTHybridForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
lowercase_ : Any = model(__snake_case, labels=__snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : int = self.prepare_config_and_inputs()
lowercase_ : Any = config_and_inputs
lowercase_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__a : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__a : Optional[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__a : int = False
__a : int = False
__a : Dict = False
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ViTHybridModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self, config_class=__snake_case, has_text_modality=__snake_case, hidden_size=37 )
def snake_case__ ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def snake_case__ ( self ) -> int:
"""simple docstring"""
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case, nn.Linear ) )
def snake_case__ ( self ) -> int:
"""simple docstring"""
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(__snake_case )
lowercase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[Any] = [*signature.parameters.keys()]
lowercase_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1], __snake_case )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = model_class(config=__snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowercase_ : Any = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"""Parameter {name} of model {model_class} seems not properly initialized""", )
@slow
def snake_case__ ( self ) -> Any:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : int = ViTHybridModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __magic_name__ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Dict = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__snake_case )
lowercase_ : Any = self.default_image_processor
lowercase_ : Optional[int] = prepare_img()
lowercase_ : str = image_processor(images=__snake_case, return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
lowercase_ : List[Any] = model(**__snake_case )
# verify the logits
lowercase_ : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape, __snake_case )
lowercase_ : Any = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __snake_case, atol=1E-4 ) )
@slow
@require_accelerate
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : int = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowercase_ : List[str] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""", device_map="""auto""" )
lowercase_ : List[Any] = prepare_img()
lowercase_ : Optional[int] = image_processor(images=__snake_case, return_tensors="""pt""" )
lowercase_ : Tuple = model(**__snake_case )
lowercase_ : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowercase_ : int = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], """tabby, tabby cat""" ) | 458 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase: List[str] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 526 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__: Union[str, Any] = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[Any] = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase__: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 528 |
'''simple docstring'''
import numpy as np
UpperCamelCase__: Any = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Any ) -> None:
UpperCAmelCase : Optional[int] = np.array(__snake_case )
def A ( self : str , __snake_case : str ) -> np.ndarray:
UpperCAmelCase , UpperCAmelCase : Dict = np.where(letter == self.SQUARE )
UpperCAmelCase : Dict = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def A ( self : Union[str, Any] , __snake_case : int , __snake_case : int ) -> str:
UpperCAmelCase : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def A ( self : List[Any] , __snake_case : str ) -> str:
UpperCAmelCase : Any = message.lower()
UpperCAmelCase : List[Any] = message.replace(''' ''' , '''''' )
UpperCAmelCase : List[Any] = message.replace('''j''' , '''i''' )
UpperCAmelCase : Tuple = np.empty((2, len(__snake_case )) )
for letter_index in range(len(__snake_case ) ):
UpperCAmelCase : str = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase : Any = numbers[0]
UpperCAmelCase : Optional[Any] = numbers[1]
UpperCAmelCase : List[str] = first_step.reshape(2 * len(__snake_case ) )
UpperCAmelCase : Union[str, Any] = ''''''
for numbers_index in range(len(__snake_case ) ):
UpperCAmelCase : Dict = int(second_step[numbers_index * 2] )
UpperCAmelCase : Union[str, Any] = int(second_step[(numbers_index * 2) + 1] )
UpperCAmelCase : Dict = self.numbers_to_letter(__snake_case , __snake_case )
UpperCAmelCase : int = encoded_message + letter
return encoded_message
def A ( self : Optional[Any] , __snake_case : str ) -> str:
UpperCAmelCase : str = message.lower()
message.replace(''' ''' , '''''' )
UpperCAmelCase : Optional[int] = np.empty(2 * len(__snake_case ) )
for letter_index in range(len(__snake_case ) ):
UpperCAmelCase : int = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase : Optional[Any] = numbers[0]
UpperCAmelCase : List[Any] = numbers[1]
UpperCAmelCase : Tuple = first_step.reshape((2, len(__snake_case )) )
UpperCAmelCase : str = ''''''
for numbers_index in range(len(__snake_case ) ):
UpperCAmelCase : Tuple = int(second_step[0, numbers_index] )
UpperCAmelCase : List[str] = int(second_step[1, numbers_index] )
UpperCAmelCase : Union[str, Any] = self.numbers_to_letter(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = decoded_message + letter
return decoded_message
| 528 | 1 |
import numpy as np
import qiskit
def _A ( lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int | None = None ):
"""simple docstring"""
lowerCAmelCase__ = np.random.default_rng(seed=lowerCAmelCase_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCAmelCase__ = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCAmelCase__ = rng.integers(2 , size=lowerCAmelCase_ )
# The set of states Alice will prepare.
lowerCAmelCase__ = rng.integers(2 , size=lowerCAmelCase_ )
# Measurement basis for Bob's qubits.
lowerCAmelCase__ = rng.integers(2 , size=lowerCAmelCase_ )
# Quantum Circuit to simulate BB84
lowerCAmelCase__ = qiskit.QuantumCircuit(lowerCAmelCase_ , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase_ ):
if alice_state[index] == 1:
bbaa_circ.x(lowerCAmelCase_ )
if alice_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase_ ):
if bob_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCAmelCase__ = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCAmelCase__ = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1 , seed_simulator=lowerCAmelCase_ )
# Returns the result of measurement.
lowerCAmelCase__ = job.result().get_counts(lowerCAmelCase_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCAmelCase__ = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCAmelCase__ = gen_key[:key_len] if len(lowerCAmelCase_ ) >= key_len else gen_key.ljust(lowerCAmelCase_ , "0" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 61 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """conditional_detr"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , _lowerCAmelCase : int=True , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Union[str, Any]=3_0_0 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : Optional[int]=2_0_4_8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : Optional[int]=2_5_6 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : str=1.0 , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[str]="sine" , _lowerCAmelCase : str="resnet50" , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.25 , **_lowerCAmelCase : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__lowercase =CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(_lowerCAmelCase , _lowerCAmelCase):
__lowercase =backbone_config.get('model_type')
__lowercase =CONFIG_MAPPING[backbone_model_type]
__lowercase =config_class.from_dict(_lowerCAmelCase)
__lowercase =use_timm_backbone
__lowercase =backbone_config
__lowercase =num_channels
__lowercase =num_queries
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =init_xavier_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =encoder_layers
__lowercase =auxiliary_loss
__lowercase =position_embedding_type
__lowercase =backbone
__lowercase =use_pretrained_backbone
__lowercase =dilation
# Hungarian matcher
__lowercase =class_cost
__lowercase =bbox_cost
__lowercase =giou_cost
# Loss coefficients
__lowercase =mask_loss_coefficient
__lowercase =dice_loss_coefficient
__lowercase =cls_loss_coefficient
__lowercase =bbox_loss_coefficient
__lowercase =giou_loss_coefficient
__lowercase =focal_alpha
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self.d_model
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowercase =self.backbone_config.to_dict()
__lowercase =self.__class__.model_type
return output
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return 1e-5
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
return 1_2
| 474 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def _UpperCamelCase ( _A = 1_5_0_0_0_0_0 ) -> int:
"""simple docstring"""
_UpperCAmelCase = defaultdict(_A )
_UpperCAmelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _A , 2 ):
if gcd(_A , _A ) > 1:
continue
_UpperCAmelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_A , limit + 1 , _A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }") | 19 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCAmelCase ( A , A , A , A ):
UpperCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
UpperCAmelCase_ = F"{src_lang}-{tgt_lang}"
UpperCAmelCase_ = F"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=A , exist_ok=A )
UpperCAmelCase_ = os.path.join(A , "README.md" )
print(F"Generating {path}" )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(A )
# make sure we are under the root of the project
_a: str = Path(__file__).resolve().parent.parent.parent
_a: Union[str, Any] = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_a: Dict = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name) | 162 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a: Any = logging.get_logger(__name__)
_a: int = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = 'table-transformer'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
SCREAMING_SNAKE_CASE__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Optional[Any]=100 , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : List[Any]=2_048 , lowerCAmelCase : str=8 , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : List[str]=2_048 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : str=0.0 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : List[str]="relu" , lowerCAmelCase : Optional[Any]=256 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Tuple=1.0 , lowerCAmelCase : Dict=False , lowerCAmelCase : str="sine" , lowerCAmelCase : Optional[Any]="resnet50" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : Dict=1 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Dict=2 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : Dict=5 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : str=0.1 , **lowerCAmelCase : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = backbone_config.get("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(lowerCAmelCase )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None, None, None
UpperCAmelCase_ = use_timm_backbone
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = backbone
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = dilation
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def __A ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return self.d_model
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return 1e-5
@property
def __A ( self : Tuple ):
'''simple docstring'''
return 12 | 162 | 1 |
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if index == number_of_items:
return 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = knapsack(__snake_case , __snake_case , __snake_case , __snake_case , index + 1 )
if weights[index] <= max_weight:
_UpperCamelCase = values[index] + knapsack(
__snake_case , __snake_case , __snake_case , max_weight - weights[index] , index + 1 )
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
def a__ ( A_, A_=False ):
'''simple docstring'''
__magic_name__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__magic_name__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def a__ ( A_, A_, A_=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ = """"""
else:
__magic_name__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__magic_name__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ = in_proj_bias[: config.hidden_size]
__magic_name__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ = in_proj_bias[-config.hidden_size :]
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = dct.pop(snake_case_ )
__magic_name__ = val
def a__ ( ):
'''simple docstring'''
__magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ = Image.open(requests.get(snake_case_, stream=snake_case_ ).raw )
return im
@torch.no_grad()
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = DeiTConfig()
# all deit models have fine-tuned heads
__magic_name__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__magic_name__ = 1000
__magic_name__ = """huggingface/label-files"""
__magic_name__ = """imagenet-1k-id2label.json"""
__magic_name__ = json.load(open(hf_hub_download(snake_case_, snake_case_, repo_type="""dataset""" ), """r""" ) )
__magic_name__ = {int(snake_case_ ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
__magic_name__ = int(deit_name[-6:-4] )
__magic_name__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
__magic_name__ = 192
__magic_name__ = 768
__magic_name__ = 12
__magic_name__ = 3
elif deit_name[9:].startswith("""small""" ):
__magic_name__ = 384
__magic_name__ = 1536
__magic_name__ = 12
__magic_name__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
__magic_name__ = 1024
__magic_name__ = 4096
__magic_name__ = 24
__magic_name__ = 16
# load original model from timm
__magic_name__ = timm.create_model(snake_case_, pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__magic_name__ = timm_model.state_dict()
__magic_name__ = create_rename_keys(snake_case_, snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_, snake_case_, snake_case_ )
read_in_q_k_v(snake_case_, snake_case_, snake_case_ )
# load HuggingFace model
__magic_name__ = DeiTForImageClassificationWithTeacher(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by DeiTImageProcessor
__magic_name__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__magic_name__ = DeiTImageProcessor(size=snake_case_, crop_size=config.image_size )
__magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" )
__magic_name__ = encoding["""pixel_values"""]
__magic_name__ = model(snake_case_ )
__magic_name__ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_, outputs.logits, atol=1e-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 529 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_snake_case = {
"gpt-neox-20b": 2048,
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self , _a=None , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , **_a , ) -> str:
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
_A : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _a ) != add_prefix_space:
_A : List[Any] = getattr(_a , pre_tok_state.pop("""type""" ) )
_A : List[str] = add_prefix_space
_A : int = pre_tok_class(**_a )
_A : Optional[Any] = add_prefix_space
def a__ ( self , _a , _a = None ) -> Tuple[str]:
_A : Optional[int] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def a__ ( self , _a ) -> List[int]:
_A : Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
_A : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 307 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class UpperCAmelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
A : Optional[int] = '''xlm-roberta'''
def __init__(self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> List[Any]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : List[str] = hidden_act
lowercase_ : List[Any] = intermediate_size
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Dict = initializer_range
lowercase_ : Any = layer_norm_eps
lowercase_ : Union[str, Any] = position_embedding_type
lowercase_ : Tuple = use_cache
lowercase_ : int = classifier_dropout
class UpperCAmelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
@property
def _lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 715 | '''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
# to overwrite at feature extractactor specific tests
A : List[str] = None
A : Tuple = None
@property
def _lowerCamelCase (self ) -> Any:
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCamelCase (self ) -> str:
lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , 'feature_size' ) )
self.assertTrue(hasattr(_a , 'sampling_rate' ) )
self.assertTrue(hasattr(_a , 'padding_value' ) )
def _lowerCamelCase (self ) -> Dict:
lowercase_ : List[str] = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Union[str, Any] = feat_extract.model_input_names[0]
lowercase_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a , processed_features[input_name] ) ) )
lowercase_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
lowercase_ : Tuple = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowercase_ : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : int = feat_extract.model_input_names[0]
lowercase_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowercase_ : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCamelCase (self ) -> Union[str, Any]:
lowercase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
lowercase_ : int = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Dict = feat_extract.model_input_names[0]
lowercase_ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowercase_ : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCamelCase (self , _a=False ) -> Union[str, Any]:
def _inputs_have_equal_length(_a ):
lowercase_ : Optional[Any] = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a , _a ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a , _a ):
if not np.allclose(np.asarray(_a ) , np.asarray(_a ) , atol=1e-3 ):
return False
return True
lowercase_ : int = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
lowercase_ : Union[str, Any] = feat_extract.model_input_names[0]
lowercase_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
lowercase_ : Optional[Any] = self.feat_extract_tester.seq_length_diff
lowercase_ : Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
lowercase_ : str = self.feat_extract_tester.min_seq_length
lowercase_ : Optional[Any] = self.feat_extract_tester.batch_size
lowercase_ : Any = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase_ : List[str] = feat_extract.pad(_a , padding=_a )
lowercase_ : Any = input_a[input_name]
lowercase_ : str = feat_extract.pad(_a , padding='longest' )
lowercase_ : List[str] = input_a[input_name]
lowercase_ : Dict = feat_extract.pad(_a , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowercase_ : Any = input_a[input_name]
lowercase_ : Any = feat_extract.pad(_a , padding='longest' , return_tensors='np' )
lowercase_ : str = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a , padding='max_length' )[input_name]
lowercase_ : Union[str, Any] = feat_extract.pad(
_a , padding='max_length' , max_length=_a , return_tensors='np' )
lowercase_ : Any = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a , _a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase_ : Tuple = feat_extract.pad(_a , pad_to_multiple_of=10 )
lowercase_ : Dict = input_a[input_name]
lowercase_ : int = feat_extract.pad(_a , padding='longest' , pad_to_multiple_of=10 )
lowercase_ : Union[str, Any] = input_a[input_name]
lowercase_ : Optional[int] = feat_extract.pad(
_a , padding='max_length' , pad_to_multiple_of=10 , max_length=_a )
lowercase_ : Tuple = input_a[input_name]
lowercase_ : Optional[int] = feat_extract.pad(
_a , padding='max_length' , pad_to_multiple_of=10 , max_length=_a , return_tensors='np' , )
lowercase_ : Union[str, Any] = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a , _a ) )
lowercase_ : str = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowercase_ : Optional[int] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCamelCase (self , _a=False ) -> Union[str, Any]:
def _inputs_have_equal_length(_a ):
lowercase_ : str = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a , _a ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a , _a ):
if not np.allclose(np.asarray(_a ) , np.asarray(_a ) , atol=1e-3 ):
return False
return True
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
lowercase_ : List[str] = feat_extract.model_input_names[0]
lowercase_ : str = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowercase_ : Union[str, Any] = feat_extract.pad(
_a , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_a )
lowercase_ : List[str] = input_a[input_name]
lowercase_ : Optional[int] = feat_extract.pad(_a , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowercase_ : Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
lowercase_ : List[str] = feat_extract.pad(
_a , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_a , )
lowercase_ : List[str] = input_a[input_name]
lowercase_ : List[Any] = feat_extract.pad(
_a , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowercase_ : List[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
lowercase_ : int = feat_extract.pad(
_a , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_a , return_tensors='np' , )
lowercase_ : Union[str, Any] = input_a[input_name]
lowercase_ : str = feat_extract.pad(
_a , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_a )
lowercase_ : int = input_a[input_name]
lowercase_ : Dict = feat_extract.pad(
_a , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowercase_ : Optional[int] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a , _a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a , truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a , padding='longest' , truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a , padding='longest' , truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a , padding='max_length' , truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase_ : str = 12
lowercase_ : List[Any] = feat_extract.pad(
_a , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_a , truncation=_a , )
lowercase_ : Any = input_a[input_name]
lowercase_ : List[Any] = feat_extract.pad(
_a , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_a , )
lowercase_ : str = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase_ : Dict = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowercase_ : List[Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def _lowerCamelCase (self ) -> str:
self._check_padding(numpify=_a )
def _lowerCamelCase (self ) -> Optional[int]:
self._check_padding(numpify=_a )
def _lowerCamelCase (self ) -> Any:
self._check_truncation(numpify=_a )
def _lowerCamelCase (self ) -> Any:
self._check_truncation(numpify=_a )
@require_torch
def _lowerCamelCase (self ) -> int:
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ : Optional[Any] = feat_extract.model_input_names[0]
lowercase_ : Dict = BatchFeature({input_name: speech_inputs} )
lowercase_ : Tuple = feat_extract.pad(_a , padding='longest' , return_tensors='np' )[input_name]
lowercase_ : List[Any] = feat_extract.pad(_a , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCamelCase (self ) -> List[Any]:
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ : List[Any] = feat_extract.model_input_names[0]
lowercase_ : Tuple = BatchFeature({input_name: speech_inputs} )
lowercase_ : List[Any] = feat_extract.pad(_a , padding='longest' , return_tensors='np' )[input_name]
lowercase_ : Optional[int] = feat_extract.pad(_a , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : Any = self.feat_extract_dict
lowercase_ : List[Any] = True
lowercase_ : Tuple = self.feature_extraction_class(**_a )
lowercase_ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ : str = [len(_a ) for x in speech_inputs]
lowercase_ : Union[str, Any] = feat_extract.model_input_names[0]
lowercase_ : List[Any] = BatchFeature({input_name: speech_inputs} )
lowercase_ : str = feat_extract.pad(_a , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _a )
def _lowerCamelCase (self ) -> List[Any]:
lowercase_ : Union[str, Any] = self.feat_extract_dict
lowercase_ : List[Any] = True
lowercase_ : List[Any] = self.feature_extraction_class(**_a )
lowercase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ : Optional[int] = [len(_a ) for x in speech_inputs]
lowercase_ : int = feat_extract.model_input_names[0]
lowercase_ : Tuple = BatchFeature({input_name: speech_inputs} )
lowercase_ : Optional[Any] = min(_a )
lowercase_ : int = feat_extract.pad(
_a , padding='max_length' , max_length=_a , truncation=_a , return_tensors='np' )
self.assertIn('attention_mask' , _a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 438 | 0 |
from typing import Any
def lowerCamelCase__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : dict , __lowerCamelCase : dict , __lowerCamelCase : dict , ):
_validation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# Creates data structures and fill initial step
__UpperCAmelCase : dict = {}
__UpperCAmelCase : dict = {}
for state in states_space:
__UpperCAmelCase : List[str] = observations_space[0]
__UpperCAmelCase : Union[str, Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase : Optional[int] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCamelCase ) ):
__UpperCAmelCase : List[Any] = observations_space[o]
__UpperCAmelCase : Any = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase : Optional[int] = """"""
__UpperCAmelCase : Tuple = -1
for k_state in states_space:
__UpperCAmelCase : str = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase : int = probability
__UpperCAmelCase : int = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase : Optional[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase : Tuple = arg_max
# The final observation
__UpperCAmelCase : int = observations_space[len(__lowerCamelCase ) - 1]
# argmax for given final observation
__UpperCAmelCase : Tuple = """"""
__UpperCAmelCase : List[str] = -1
for k_state in states_space:
__UpperCAmelCase : Dict = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase : Dict = probability
__UpperCAmelCase : Tuple = k_state
__UpperCAmelCase : Tuple = arg_max
# Process pointers backwards
__UpperCAmelCase : Tuple = last_state
__UpperCAmelCase : Dict = []
for o in range(len(__lowerCamelCase ) - 1 , -1 , -1 ):
result.append(__lowerCamelCase )
__UpperCAmelCase : str = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
_validate_not_empty(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_validate_lists(__lowerCamelCase , __lowerCamelCase )
_validate_dicts(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any ):
_validate_list(__lowerCamelCase , """observations_space""" )
_validate_list(__lowerCamelCase , """states_space""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
if not isinstance(_object , __lowerCamelCase ):
__UpperCAmelCase : List[str] = f"""{var_name} must be a list"""
raise ValueError(__lowerCamelCase )
else:
for x in _object:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Dict = f"""{var_name} must be a list of strings"""
raise ValueError(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
_validate_dict(__lowerCamelCase , """initial_probabilities""" , __lowerCamelCase )
_validate_nested_dict(__lowerCamelCase , """transition_probabilities""" )
_validate_nested_dict(__lowerCamelCase , """emission_probabilities""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
_validate_dict(_object , __lowerCamelCase , __lowerCamelCase )
for x in _object.values():
_validate_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : type , __lowerCamelCase : bool = False ):
if not isinstance(_object , __lowerCamelCase ):
__UpperCAmelCase : Any = f"""{var_name} must be a dict"""
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object ):
__UpperCAmelCase : Union[str, Any] = f"""{var_name} all keys must be strings"""
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object.values() ):
__UpperCAmelCase : int = """nested dictionary """ if nested else """"""
__UpperCAmelCase : Optional[Any] = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 63 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [False] * len(_UpperCamelCase )
__lowerCAmelCase = []
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [-1] * (len(_UpperCamelCase ))
__lowerCAmelCase = 0
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = float("Inf" )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(_UpperCamelCase , graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
return max_flow
A : Union[str, Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
A , A : Any = 0, 5
print(ford_fulkerson(graph, source, sink))
| 636 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__A : List[Any] = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__A : Optional[int] = 'UperNetConfig'
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 0 , _a = False , _a = 1 , ):
"""simple docstring"""
super().__init__()
a__ = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , padding=_a , bias=_a , dilation=_a , )
a__ = nn.BatchNormad(_a )
a__ = nn.ReLU()
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self.conv(_a )
a__ = self.batch_norm(_a )
a__ = self.activation(_a )
return output
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
super().__init__()
a__ = [
nn.AdaptiveAvgPoolad(_a ),
UperNetConvModule(_a , _a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_a ) , _a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = input
for layer in self.layers:
a__ = layer(_a )
return hidden_state
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a ):
"""simple docstring"""
super().__init__()
a__ = pool_scales
a__ = align_corners
a__ = in_channels
a__ = channels
a__ = []
for i, pool_scale in enumerate(_a ):
a__ = UperNetPyramidPoolingBlock(pool_scale=_a , in_channels=_a , channels=_a )
self.blocks.append(_a )
self.add_module(str(_a ) , _a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = []
for ppm in self.blocks:
a__ = ppm(_a )
a__ = nn.functional.interpolate(
_a , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(_a )
return ppm_outs
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
super().__init__()
a__ = config
a__ = config.pool_scales # e.g. (1, 2, 3, 6)
a__ = in_channels
a__ = config.hidden_size
a__ = False
a__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a__ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a__ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a__ = nn.ModuleList()
a__ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a__ = UperNetConvModule(_a , self.channels , kernel_size=1 )
a__ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_a )
self.fpn_convs.append(_a )
a__ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase__ ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def lowercase__ ( self , _a ):
"""simple docstring"""
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = inputs[-1]
a__ = [x]
psp_outs.extend(self.psp_modules(_a ) )
a__ = torch.cat(_a , dim=1 )
a__ = self.bottleneck(_a )
return output
def lowercase__ ( self , _a ):
"""simple docstring"""
# build laterals
a__ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_a ) )
# build top-down path
a__ = len(_a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a__ = laterals[i - 1].shape[2:]
a__ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_a , mode='bilinear' , align_corners=self.align_corners )
# build outputs
a__ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a__ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
a__ = torch.cat(_a , dim=1 )
a__ = self.fpn_bottleneck(_a )
a__ = self.classifier(_a )
return output
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a = 2 , _a = 3 , _a = 1 ):
"""simple docstring"""
super().__init__()
a__ = config
a__ = config.auxiliary_in_channels
a__ = config.auxiliary_channels
a__ = config.auxiliary_num_convs
a__ = config.auxiliary_concat_input
a__ = in_index
a__ = (kernel_size // 2) * dilation
a__ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
if self.num_convs == 0:
a__ = nn.Identity()
else:
a__ = nn.Sequential(*_a )
if self.concat_input:
a__ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_a , padding=kernel_size // 2 )
a__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase__ ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def lowercase__ ( self , _a ):
"""simple docstring"""
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase__ ( self , _a ):
"""simple docstring"""
# just take the relevant feature maps
a__ = encoder_hidden_states[self.in_index]
a__ = self.convs(_a )
if self.concat_input:
a__ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a__ = self.classifier(_a )
return output
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Tuple = UperNetConfig
SCREAMING_SNAKE_CASE:Optional[Any] = 'pixel_values'
SCREAMING_SNAKE_CASE:Union[str, Any] = True
def lowercase__ ( self , _a ):
"""simple docstring"""
if isinstance(_a , _a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase__ ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase__ ( self , _a , _a=False ):
"""simple docstring"""
if isinstance(_a , _a ):
a__ = value
__A : Optional[Any] = r'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__A : Any = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , _A , )
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
super().__init__(_a )
a__ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a__ = UperNetHead(_a , in_channels=self.backbone.channels )
a__ = UperNetFCNHead(_a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=_a , config_class=_CONFIG_FOR_DOC )
def lowercase__ ( self , _a = None , _a = None , _a = None , _a = None , _a = None , ):
"""simple docstring"""
a__ = return_dict if return_dict is not None else self.config.use_return_dict
a__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ = output_attentions if output_attentions is not None else self.config.output_attentions
a__ = self.backbone.forward_with_filtered_kwargs(
_a , output_hidden_states=_a , output_attentions=_a )
a__ = outputs.feature_maps
a__ = self.decode_head(_a )
a__ = nn.functional.interpolate(_a , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=_a )
a__ = None
if self.auxiliary_head is not None:
a__ = self.auxiliary_head(_a )
a__ = nn.functional.interpolate(
_a , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=_a )
a__ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
a__ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a__ = loss_fct(_a , _a )
a__ = loss_fct(_a , _a )
a__ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a__ = (logits,) + outputs[1:]
else:
a__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 126 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : Any = 'pt'
elif is_tf_available():
__A : List[str] = 'tf'
else:
__A : Union[str, Any] = 'jax'
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:str = PerceiverTokenizer
SCREAMING_SNAKE_CASE:Optional[int] = False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
a__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase__ ( self , **_a ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def lowercase__ ( self , _a , _a=False , _a=20 , _a=5 ):
"""simple docstring"""
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
a__ = []
for i in range(len(_a ) ):
try:
a__ = tokenizer.decode([i] , clean_up_tokenization_spaces=_a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a__ = list(filter(lambda _a : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _a ) )
a__ = list(filter(lambda _a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_a ) , _a ) )
if max_length is not None and len(_a ) > max_length:
a__ = toks[:max_length]
if min_length is not None and len(_a ) < min_length and len(_a ) > 0:
while len(_a ) < min_length:
a__ = toks + toks
# toks_str = [t[1] for t in toks]
a__ = [t[0] for t in toks]
# Ensure consistency
a__ = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
if " " not in output_txt and len(_a ) > 1:
a__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_a )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_a )
)
if with_prefix_space:
a__ = ' ' + output_txt
a__ = tokenizer.encode(_a , add_special_tokens=_a )
return output_txt, output_ids
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = 'Unicode €.'
a__ = tokenizer(_a )
a__ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _a )
# decoding
a__ = tokenizer.decode(_a )
self.assertEqual(_a , '[CLS]Unicode €.[SEP]' )
a__ = tokenizer('e è é ê ë' )
a__ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _a )
# decoding
a__ = tokenizer.decode(_a )
self.assertEqual(_a , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
a__ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
a__ = tokenizer(_a , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
if FRAMEWORK != "jax":
a__ = list(batch.input_ids.numpy()[0] )
else:
a__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_a , _a )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a__ = tokenizer(_a , padding=_a , return_tensors=_a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _a )
self.assertIn('attention_mask' , _a )
self.assertNotIn('decoder_input_ids' , _a )
self.assertNotIn('decoder_attention_mask' , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = [
'Summary of the text.',
'Another summary.',
]
a__ = tokenizer(
text_target=_a , max_length=32 , padding='max_length' , truncation=_a , return_tensors=_a )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase__ ( self ):
"""simple docstring"""
# safety check on max_len default value so we are sure the test works
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ' He is very happy, UNwant\u00E9d,running'
a__ = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
a__ = tokenizer.__class__.from_pretrained(_a )
a__ = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
shutil.rmtree(_a )
a__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
a__ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
a__ = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
a__ = tokenizer.__class__.from_pretrained(_a )
a__ = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a__ = tokenizer.__class__.from_pretrained(_a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_a )
with open(os.path.join(_a , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
a__ = json.load(_a )
with open(os.path.join(_a , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
a__ = json.load(_a )
a__ = [F'''<extra_id_{i}>''' for i in range(125 )]
a__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
a__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_a , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_a , _a )
with open(os.path.join(_a , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_a , _a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a__ = tokenizer_class.from_pretrained(
_a , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a__ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_a )]
a__ = tokenizer_class.from_pretrained(
_a , additional_special_tokens=_a , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
a__ = self.get_tokenizers(fast=_a , do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a__ = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
a__ = tokenizer.convert_tokens_to_string(_a )
self.assertIsInstance(_a , _a )
| 126 | 1 |
import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
UpperCamelCase__ : Tuple = np.shape(UpperCamelCase__ )
UpperCamelCase__ : Tuple = np.shape(UpperCamelCase__ )
UpperCamelCase__ : List[Any] = np.shape(UpperCamelCase__ )
if shape_a[0] != shape_b[0]:
UpperCamelCase__ : Any = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCamelCase__ )
if shape_b[1] != shape_c[1]:
UpperCamelCase__ : Dict = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCamelCase__ )
UpperCamelCase__ : List[str] = pseudo_inv
if a_inv is None:
try:
UpperCamelCase__ : Union[str, Any] = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> None:
"""simple docstring"""
UpperCamelCase__ : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase__ : int = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase__ : List[Any] = np.array([[2, 1], [6, 3]] )
UpperCamelCase__ : Union[str, Any] = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = np.block([[a, b], [b.T, c]] )
UpperCamelCase__ : Any = np.linalg.det(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = np.linalg.det(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
"""simple docstring"""
UpperCamelCase__ : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase__ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase__ : Any = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
"""simple docstring"""
UpperCamelCase__ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase__ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase__ : Optional[int] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 285 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''wav2vec2'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE="group" , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , __SCREAMING_SNAKE_CASE=(1_0, 3, 3, 3, 3, 2, 2) , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.05 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=3_2_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0_0 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="sum" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , __SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : List[Any] = feat_extract_norm
UpperCamelCase__ : Union[str, Any] = feat_extract_activation
UpperCamelCase__ : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = conv_bias
UpperCamelCase__ : Optional[Any] = num_conv_pos_embeddings
UpperCamelCase__ : Tuple = num_conv_pos_embedding_groups
UpperCamelCase__ : Tuple = len(self.conv_dim )
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : Optional[int] = hidden_dropout
UpperCamelCase__ : Tuple = attention_dropout
UpperCamelCase__ : List[Any] = activation_dropout
UpperCamelCase__ : Optional[int] = feat_proj_dropout
UpperCamelCase__ : int = final_dropout
UpperCamelCase__ : str = layerdrop
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : List[Any] = vocab_size
UpperCamelCase__ : Dict = do_stable_layer_norm
UpperCamelCase__ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ : Tuple = apply_spec_augment
UpperCamelCase__ : str = mask_time_prob
UpperCamelCase__ : Tuple = mask_time_length
UpperCamelCase__ : Optional[Any] = mask_time_min_masks
UpperCamelCase__ : str = mask_feature_prob
UpperCamelCase__ : List[str] = mask_feature_length
UpperCamelCase__ : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase__ : Dict = num_codevectors_per_group
UpperCamelCase__ : Optional[int] = num_codevector_groups
UpperCamelCase__ : Dict = contrastive_logits_temperature
UpperCamelCase__ : List[Any] = feat_quantizer_dropout
UpperCamelCase__ : List[Any] = num_negatives
UpperCamelCase__ : Tuple = codevector_dim
UpperCamelCase__ : List[str] = proj_codevector_dim
UpperCamelCase__ : Tuple = diversity_loss_weight
# ctc loss
UpperCamelCase__ : List[str] = ctc_loss_reduction
UpperCamelCase__ : Optional[Any] = ctc_zero_infinity
# adapter
UpperCamelCase__ : List[Any] = add_adapter
UpperCamelCase__ : Any = adapter_kernel_size
UpperCamelCase__ : Tuple = adapter_stride
UpperCamelCase__ : Tuple = num_adapter_layers
UpperCamelCase__ : Optional[Any] = output_hidden_size or hidden_size
UpperCamelCase__ : Union[str, Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = xvector_output_dim
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 285 | 1 |
"""simple docstring"""
from math import isqrt, loga
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->list[int]:
_lowerCamelCase : int = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : Union[str, Any] = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]]
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ = 80_0800 , SCREAMING_SNAKE_CASE_ = 80_0800 ) ->int:
_lowerCamelCase : List[str] = degree * loga(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Tuple = int(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Union[str, Any] = calculate_prime_numbers(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Dict = 0
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 700 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict ={
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """dpt"""
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=384 , _lowercase=16 , _lowercase=3 , _lowercase=False , _lowercase=True , _lowercase=[2, 5, 8, 11] , _lowercase="project" , _lowercase=[4, 2, 1, 0.5] , _lowercase=[96, 192, 384, 768] , _lowercase=256 , _lowercase=-1 , _lowercase=False , _lowercase=True , _lowercase=0.4 , _lowercase=255 , _lowercase=0.1 , _lowercase=[1, 1024, 24, 24] , _lowercase=[0, 1] , _lowercase=None , **_lowercase , ) -> Optional[int]:
super().__init__(**_lowercase )
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Optional[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_lowerCamelCase : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_lowerCamelCase : Any = BitConfig(**_lowercase )
elif isinstance(_lowercase , _lowercase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_lowerCamelCase : int = BitConfig(**_lowercase )
elif isinstance(_lowercase , _lowercase ):
_lowerCamelCase : int = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_lowerCamelCase : List[Any] = backbone_featmap_shape
_lowerCamelCase : Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : int = None
_lowerCamelCase : Dict = []
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : str = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Tuple = qkv_bias
_lowerCamelCase : Tuple = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_lowerCamelCase : Union[str, Any] = readout_type
_lowerCamelCase : List[str] = reassemble_factors
_lowerCamelCase : Union[str, Any] = neck_hidden_sizes
_lowerCamelCase : List[Any] = fusion_hidden_size
_lowerCamelCase : List[Any] = head_in_index
_lowerCamelCase : List[str] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowerCamelCase : List[str] = use_auxiliary_head
_lowerCamelCase : List[str] = auxiliary_loss_weight
_lowerCamelCase : str = semantic_loss_ignore_index
_lowerCamelCase : Optional[Any] = semantic_classifier_dropout
def a__ ( self ) -> Any:
_lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCamelCase : int = self.backbone_config.to_dict()
_lowerCamelCase : List[Any] = self.__class__.model_type
return output
| 558 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase__ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __UpperCAmelCase ( ) -> int:
UpperCamelCase__ : List[str] = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase__ : str = get_sagemaker_input()
else:
UpperCamelCase__ : Any = get_cluster_input()
return config
def __UpperCAmelCase ( lowerCamelCase_=None) -> Union[str, Any]:
if subparsers is not None:
UpperCamelCase__ : int = subparsers.add_parser('config' , description=lowerCamelCase_)
else:
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser('Accelerate config command' , description=lowerCamelCase_)
parser.add_argument(
'--config_file' , default=lowerCamelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase_)
return parser
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCamelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCamelCase_):
os.makedirs(lowerCamelCase_)
UpperCamelCase__ : str = default_yaml_config_file
if config_file.endswith('.json'):
config.to_json_file(lowerCamelCase_)
else:
config.to_yaml_file(lowerCamelCase_)
print(f'accelerate configuration saved at {config_file}')
def __UpperCAmelCase ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = config_command_parser()
UpperCamelCase__ : Dict = parser.parse_args()
config_command(lowerCamelCase_)
if __name__ == "__main__":
main()
| 596 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class a_ ( unittest.TestCase ):
def __init__( self : Any , UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return {}
def a_ ( ) -> int:
"""simple docstring"""
snake_case : Any = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
snake_case : Tuple = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : int = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize feature_extractor
snake_case : List[Any] = self.feature_extraction_class()
# Test not batched input
snake_case : List[str] = get_html_strings()[0]
snake_case : Any = feature_extractor(UpperCAmelCase__ )
# fmt: off
snake_case : List[Any] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
snake_case : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
# Test batched
snake_case : List[str] = get_html_strings()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ )
# fmt: off
snake_case : List[Any] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
snake_case : Union[str, Any] = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
| 598 | 0 |
'''simple docstring'''
from collections import namedtuple
lowercase = namedtuple('''from_to''', '''from_ to''')
lowercase = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0454, 264.172),
'''cubicyard''': from_to(0.7_6455, 1.3_0795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.0_0023_6588, 4226.75),
}
def __A ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ", ".join(_SCREAMING_SNAKE_CASE ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ", ".join(_SCREAMING_SNAKE_CASE ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 564 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = IFInpaintingSuperResolutionPipeline
snake_case__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
snake_case__ : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a_ ( self ):
return self._get_superresolution_dummy_components()
def a_ ( self , a__ , a__=0 ):
if str(a__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(a__ )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a_ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def a_ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a_ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a_ ( self ):
self._test_save_load_local()
def a_ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 564 | 1 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE__ : List[str] = 2_9_9_7_9_2_4_5_8
# Symbols
SCREAMING_SNAKE_CASE__ : List[Any] = symbols("""ct x y z""")
def _A ( lowerCamelCase ):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def _A ( lowerCamelCase ):
return 1 / sqrt(1 - beta(__lowerCamelCase ) ** 2 )
def _A ( lowerCamelCase ):
return np.array(
[
[gamma(__lowerCamelCase ), -gamma(__lowerCamelCase ) * beta(__lowerCamelCase ), 0, 0],
[-gamma(__lowerCamelCase ) * beta(__lowerCamelCase ), gamma(__lowerCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _A ( lowerCamelCase , lowerCamelCase = None ):
# Ensure event is not empty
if event is None:
a__ : int = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__lowerCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE__ : Optional[Any] = transform(2_9_9_7_9_2_4_5)
print("""Example of four vector: """)
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE__ : List[Any] = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE__ : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 112 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int , __magic_name__ :Optional[Any] ):
'''simple docstring'''
return F'gaussian_noise_s={seed}_shape={"_".join([str(__magic_name__ ) for s in shape] )}.npy'
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self :int , __magic_name__ :List[str]=0 , __magic_name__ :List[Any]=(4, 4, 64, 64) , __magic_name__ :Optional[int]=False ):
'''simple docstring'''
a = jnp.bfloataa if fpaa else jnp.floataa
a = jnp.array(load_hf_numpy(self.get_file_format(__magic_name__ , __magic_name__ ) ) , dtype=__magic_name__ )
return image
def lowerCamelCase__ ( self :Any , __magic_name__ :int=False , __magic_name__ :List[str]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
a = jnp.bfloataa if fpaa else jnp.floataa
a = """bf16""" if fpaa else None
a , a = FlaxUNetaDConditionModel.from_pretrained(
__magic_name__ , subfolder="""unet""" , dtype=__magic_name__ , revision=__magic_name__ )
return model, params
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Union[str, Any]=0 , __magic_name__ :Any=(4, 77, 768) , __magic_name__ :Optional[Any]=False ):
'''simple docstring'''
a = jnp.bfloataa if fpaa else jnp.floataa
a = jnp.array(load_hf_numpy(self.get_file_format(__magic_name__ , __magic_name__ ) ) , dtype=__magic_name__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
a , a = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=__magic_name__ )
a = self.get_latents(__magic_name__ , fpaa=__magic_name__ )
a = self.get_encoder_hidden_states(__magic_name__ , fpaa=__magic_name__ )
a = model.apply(
{"""params""": params} , __magic_name__ , jnp.array(__magic_name__ , dtype=jnp.intaa ) , encoder_hidden_states=__magic_name__ , ).sample
assert sample.shape == latents.shape
a = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
a = jnp.array(__magic_name__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__magic_name__ , __magic_name__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :int , __magic_name__ :str ):
'''simple docstring'''
a , a = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=__magic_name__ )
a = self.get_latents(__magic_name__ , shape=(4, 4, 96, 96) , fpaa=__magic_name__ )
a = self.get_encoder_hidden_states(__magic_name__ , shape=(4, 77, 1024) , fpaa=__magic_name__ )
a = model.apply(
{"""params""": params} , __magic_name__ , jnp.array(__magic_name__ , dtype=jnp.intaa ) , encoder_hidden_states=__magic_name__ , ).sample
assert sample.shape == latents.shape
a = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
a = jnp.array(__magic_name__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__magic_name__ , __magic_name__ , atol=1E-2 )
| 468 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A ( lowercase , lowercase ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase , lowercase ) ) )
def A ( lowercase , lowercase ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
UpperCamelCase = (
'Wrong input data\'s dimensions... '
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowercase )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase = (
'Wrong input data\'s shape... '
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowercase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCamelCase = (
'Input data have different datatype... '
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowercase )
UpperCamelCase = []
for value in value_array:
UpperCamelCase = euclidean(lowercase , dataset[0] )
UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase = euclidean(lowercase , lowercase )
if dist > temp_dist:
UpperCamelCase = temp_dist
UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A ( lowercase , lowercase ) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase ) / (norm(lowercase ) * norm(lowercase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_UpperCAmelCase : Tuple = _symbol_database.Default()
_UpperCAmelCase : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
_UpperCAmelCase : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_UpperCAmelCase : int = None
_UpperCAmelCase : List[str] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_UpperCAmelCase : Optional[Any] = 45
_UpperCAmelCase : Any = 1_581
_UpperCAmelCase : Tuple = 1_517
_UpperCAmelCase : List[str] = 1_570
_UpperCAmelCase : int = 1_584
_UpperCAmelCase : List[Any] = 1_793
_UpperCAmelCase : Optional[int] = 1_795
_UpperCAmelCase : Any = 1_916
_UpperCAmelCase : Tuple = 1_864
_UpperCAmelCase : List[Any] = 1_905
_UpperCAmelCase : Union[str, Any] = 1_919
_UpperCAmelCase : str = 2_429
_UpperCAmelCase : Any = 2_208
_UpperCAmelCase : Dict = 2_418
_UpperCAmelCase : Optional[Any] = 2_323
_UpperCAmelCase : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 3 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self :Any , __lowercase :List[Any] , __lowercase :Union[str, Any] , __lowercase :str , __lowercase :Optional[int] , __lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :str , __lowercase :str , __lowercase :str , __lowercase :List[str] = False , ):
super().__init__()
__lowerCamelCase : Optional[Any] =nn.Embedding(__lowercase , __lowercase )
__lowerCamelCase : Any =nn.Embedding(__lowercase , __lowercase )
__lowerCamelCase : List[str] =False
__lowerCamelCase : Any =nn.Dropout(p=__lowercase )
__lowerCamelCase : Optional[Any] =TaConfig(
vocab_size=__lowercase , d_model=__lowercase , num_heads=__lowercase , d_kv=__lowercase , d_ff=__lowercase , dropout_rate=__lowercase , feed_forward_proj=__lowercase , is_decoder=__lowercase , is_encoder_decoder=__lowercase , )
__lowerCamelCase : Any =nn.ModuleList()
for lyr_num in range(__lowercase ):
__lowerCamelCase : Union[str, Any] =TaBlock(__lowercase )
self.encoders.append(__lowercase )
__lowerCamelCase : List[Any] =TaLayerNorm(__lowercase )
__lowerCamelCase : Union[str, Any] =nn.Dropout(p=__lowercase )
def __lowercase ( self :Any , __lowercase :Optional[int] , __lowercase :Tuple ):
__lowerCamelCase : List[Any] =self.token_embedder(__lowercase )
__lowerCamelCase : Optional[Any] =encoder_input_tokens.shape[1]
__lowerCamelCase : int =torch.arange(__lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(__lowercase )
__lowerCamelCase : Union[str, Any] =self.dropout_pre(__lowercase )
# inverted the attention mask
__lowerCamelCase : int =encoder_input_tokens.size()
__lowerCamelCase : Dict =self.get_extended_attention_mask(__lowercase , __lowercase )
for lyr in self.encoders:
__lowerCamelCase : Optional[int] =lyr(__lowercase , __lowercase )[0]
__lowerCamelCase : Union[str, Any] =self.layer_norm(__lowercase )
return self.dropout_post(__lowercase ), encoder_inputs_mask
| 179 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCAmelCase_ = True
from torch.cuda.amp import autocast
lowerCAmelCase_ = logging.getLogger(__name__)
def A__ ( A : str=None , A : Union[str, Any]=None):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__SCREAMING_SNAKE_CASE = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
__SCREAMING_SNAKE_CASE = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , lowerCamelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCamelCase : Dict = [{"input_values": feature["input_values"]} for feature in features]
UpperCamelCase : str = [{"input_ids": feature["labels"]} for feature in features]
UpperCamelCase : Optional[Any] = self.processor.pad(
lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
UpperCamelCase : Union[str, Any] = self.processor.pad(
labels=lowerCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
UpperCamelCase : int = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
UpperCamelCase : Any = labels
return batch
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> torch.Tensor:
'''simple docstring'''
model.train()
UpperCamelCase : List[Any] = self._prepare_inputs(lowerCamelCase )
if self.use_amp:
with autocast():
UpperCamelCase : Union[str, Any] = self.compute_loss(lowerCamelCase , lowerCamelCase )
else:
UpperCamelCase : List[str] = self.compute_loss(lowerCamelCase , lowerCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
UpperCamelCase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCamelCase : str = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCamelCase : List[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase )
else:
loss.backward()
return loss.detach()
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCamelCase : Any = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , A)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets:
UpperCamelCase : Union[str, Any] = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name)
UpperCamelCase : Optional[int] = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test")
# Create and save tokenizer
UpperCamelCase : List[str] = F'''[{''.join(data_args.chars_to_ignore)}]'''
def remove_special_characters(A : List[Any]):
UpperCamelCase : Optional[int] = re.sub(A , "" , batch["sentence"]).lower() + " "
return batch
UpperCamelCase : Any = train_dataset.map(A , remove_columns=["sentence"])
UpperCamelCase : int = eval_dataset.map(A , remove_columns=["sentence"])
def extract_all_chars(A : Union[str, Any]):
UpperCamelCase : Tuple = " ".join(batch["text"])
UpperCamelCase : Optional[Any] = list(set(A))
return {"vocab": [vocab], "all_text": [all_text]}
UpperCamelCase : Tuple = train_dataset.map(
A , batched=A , batch_size=-1 , keep_in_memory=A , remove_columns=train_dataset.column_names , )
UpperCamelCase : Optional[Any] = train_dataset.map(
A , batched=A , batch_size=-1 , keep_in_memory=A , remove_columns=eval_dataset.column_names , )
UpperCamelCase : Dict = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0]))
UpperCamelCase : Tuple = {v: k for k, v in enumerate(A)}
UpperCamelCase : Tuple = vocab_dict[" "]
del vocab_dict[" "]
UpperCamelCase : List[str] = len(A)
UpperCamelCase : Dict = len(A)
with open("vocab.json" , "w") as vocab_file:
json.dump(A , A)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : int = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
UpperCamelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=A , return_attention_mask=A)
UpperCamelCase : int = WavaVecaProcessor(feature_extractor=A , tokenizer=A)
UpperCamelCase : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer) , )
if data_args.max_train_samples is not None:
UpperCamelCase : Union[str, Any] = min(len(A) , data_args.max_train_samples)
UpperCamelCase : int = train_dataset.select(range(A))
if data_args.max_val_samples is not None:
UpperCamelCase : Dict = eval_dataset.select(range(data_args.max_val_samples))
UpperCamelCase : Union[str, Any] = torchaudio.transforms.Resample(4_80_00 , 1_60_00)
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A : Union[str, Any]):
UpperCamelCase , UpperCamelCase : List[str] = torchaudio.load(batch["path"])
UpperCamelCase : List[str] = resampler(A).squeeze().numpy()
UpperCamelCase : Dict = 1_60_00
UpperCamelCase : str = batch["text"]
return batch
UpperCamelCase : int = train_dataset.map(
A , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : int = eval_dataset.map(
A , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A : Dict):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"])) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
UpperCamelCase : Union[str, Any] = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0])
batch.update(A)
return batch
UpperCamelCase : str = train_dataset.map(
A , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : Union[str, Any] = eval_dataset.map(
A , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A , num_proc=data_args.preprocessing_num_workers , )
# Metric
UpperCamelCase : Tuple = datasets.load_metric("wer")
def compute_metrics(A : int):
UpperCamelCase : Union[str, Any] = pred.predictions
UpperCamelCase : Tuple = np.argmax(A , axis=-1)
UpperCamelCase : int = processor.tokenizer.pad_token_id
UpperCamelCase : Union[str, Any] = processor.batch_decode(A)
# we do not want to group tokens when computing the metrics
UpperCamelCase : List[Any] = processor.batch_decode(pred.label_ids , group_tokens=A)
UpperCamelCase : Optional[Any] = wer_metric.compute(predictions=A , references=A)
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
UpperCamelCase : Dict = DataCollatorCTCWithPadding(processor=A , padding=A)
# Initialize our Trainer
UpperCamelCase : int = CTCTrainer(
model=A , data_collator=A , args=A , compute_metrics=A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCamelCase : List[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
UpperCamelCase : Tuple = model_args.model_name_or_path
else:
UpperCamelCase : str = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank):
processor.save_pretrained(training_args.output_dir)
UpperCamelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=A)
trainer.save_model()
UpperCamelCase : int = train_result.metrics
UpperCamelCase : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A)
)
UpperCamelCase : int = min(A , len(A))
trainer.log_metrics("train" , A)
trainer.save_metrics("train" , A)
trainer.save_state()
# Evaluation
UpperCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
UpperCamelCase : Optional[Any] = trainer.evaluate()
UpperCamelCase : int = data_args.max_val_samples if data_args.max_val_samples is not None else len(A)
UpperCamelCase : Dict = min(A , len(A))
trainer.log_metrics("eval" , A)
trainer.save_metrics("eval" , A)
return results
if __name__ == "__main__":
main()
| 173 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case__ : Optional[Any] = flax_key_tuple[:-1] + ("weight",)
snake_case__ : Dict = torch.permute(lowerCAmelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ):
# linear layer
snake_case__ : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
snake_case__ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case__ : Optional[int] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if "metadata" in layer:
snake_case__ : int = layer.split('''metadata''' )
snake_case__ : Dict = "".join(split_layer[0] )[:-1]
snake_case__ : List[str] = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
snake_case__ : List[Any] = layer.split('''kvstore''' )
snake_case__ : List[str] = "".join(split_layer[0] )[:-1]
snake_case__ : Tuple = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
snake_case__ : Any = layer.split('''/''' )
snake_case__ : Tuple = "/".join(split_layer[:-1] )
snake_case__ : Any = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case__ : Union[str, Any] = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
snake_case__ : List[Any] = "file"
else:
snake_case__ : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = rename_keys(lowerCAmelCase_ )
snake_case__ : int = {}
for k, v in current_block.items():
snake_case__ : Optional[Any] = v
snake_case__ : str = new_current_block
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = WEIGHTS_NAME ) -> str:
"""simple docstring"""
snake_case__ : Dict = convert_file_size_to_int(lowerCAmelCase_ )
snake_case__ : List[Any] = []
snake_case__ : int = {}
snake_case__ : Optional[Any] = 0
snake_case__ : List[Any] = 0
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
snake_case__ : str = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
snake_case__ : Dict = flatten_dict(lowerCAmelCase_ , sep='''/''' )
snake_case__ : Optional[int] = {}
for layer in checkpoint_info.keys():
snake_case__ : Optional[Any] = get_key_and_tensorstore_dict(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if curr_real_layer_name in all_layers:
snake_case__ : List[Any] = content
else:
snake_case__ : Dict = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case__ : str = torch.tensor(lowerCAmelCase_ )
snake_case__ : Union[str, Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case__ : Optional[Any] = rename_base_flax_keys(tuple(key.split('''/''' ) ) , lowerCAmelCase_ )
snake_case__ : Any = "/".join(lowerCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case__ : List[Any] = os.path.join(
lowerCAmelCase_ , weights_name.replace('''.bin''' , f"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case__ : List[Any] = {}
snake_case__ : Optional[int] = 0
snake_case__ : Any = raw_weights.to(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case__ : Optional[Any] = os.path.join(lowerCAmelCase_ , weights_name.replace('''.bin''' , f"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case__ : Dict = {}
snake_case__ : List[str] = {}
for idx, shard in enumerate(lowerCAmelCase_ ):
snake_case__ : Optional[int] = weights_name.replace(
'''.bin''' , f"""-{idx+1:05d}-of-{len(lowerCAmelCase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
snake_case__ : Tuple = os.path.join(lowerCAmelCase_ , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
snake_case__ : Optional[int] = shard
for key in shard:
snake_case__ : Union[str, Any] = shard_file
# Add the metadata
snake_case__ : Dict = {"total_size": total_size}
snake_case__ : Tuple = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case__ : Union[str, Any] = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + "\n"
f.write(lowerCAmelCase_ )
return metadata, index
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
A__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case__ : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
snake_case__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
snake_case__ : Optional[int] = TaTokenizer.from_pretrained('''t5-small''' )
snake_case__ : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
snake_case__ : Optional[int] = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' ).input_ids
snake_case__ : int = model.generate(lowerCAmelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 707 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | 0 |
'''simple docstring'''
import math
lowercase_ = 10
lowercase_ = 7
lowercase_ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase (__A = 20):
"""simple docstring"""
_a = math.comb(__A , __A)
_a = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __A)
_a = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 11 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 25 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int ):
A = abs(snake_case__ )
A = 0
while n > 0:
res += n % 10
n //= 10
return res
def _snake_case ( snake_case__ : int ):
A = abs(snake_case__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _snake_case ( snake_case__ : int ):
return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) )
def _snake_case ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case__ : Callable , snake_case__ : int ) -> None:
A = F'{func.__name__}({value})'
A = timeit(F'__main__.{call}' , setup='import __main__' )
print(F'{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case__ , snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 22 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = "▁"
lowerCamelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase__ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
lowerCamelCase__ = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
lowerCamelCase__ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = ["input_ids", "attention_mask"]
UpperCamelCase_ : List[int] = []
UpperCamelCase_ : List[int] = []
def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=None , a=None , a=None , a = None , a=None , a=False , **a , ) -> int:
'''simple docstring'''
_UpperCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCamelCase = legacy_behaviour
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , tokenizer_file=a , src_lang=a , tgt_lang=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=a , **a , )
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
_UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase = 1
_UpperCamelCase = len(self.sp_model )
_UpperCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a )
}
_UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCamelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCamelCase = src_lang if src_lang is not None else """eng_Latn"""
_UpperCamelCase = self.lang_code_to_id[self._src_lang]
_UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
_UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A_ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A_ ( self , a ) -> None:
'''simple docstring'''
_UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self , a , a = None , a = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
_UpperCamelCase = [1] * len(self.prefix_tokens )
_UpperCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def A_ ( self , a , a = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self , a , a = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , a , a , a , a , **a ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_UpperCamelCase = src_lang
_UpperCamelCase = self(a , add_special_tokens=a , return_tensors=a , **a )
_UpperCamelCase = self.convert_tokens_to_ids(a )
_UpperCamelCase = tgt_lang_id
return inputs
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self , a ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a , out_type=a )
def A_ ( self , a ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase = self.sp_model.PieceToId(a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A_ ( self , a ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A_ ( self , a ) -> int:
'''simple docstring'''
_UpperCamelCase = """""".join(a ).replace(a , """ """ ).strip()
return out_string
def A_ ( self , a , a = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCamelCase = os.path.join(
a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , """wb""" ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def A_ ( self , a , a = "eng_Latn" , a = None , a = "fra_Latn" , **a , ) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(a , a , **a )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self ) -> Tuple:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self , a ) -> None:
'''simple docstring'''
_UpperCamelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_UpperCamelCase = []
_UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase = [self.cur_lang_code]
_UpperCamelCase = [self.eos_token_id]
def A_ ( self , a ) -> None:
'''simple docstring'''
_UpperCamelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_UpperCamelCase = []
_UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase = [self.cur_lang_code]
_UpperCamelCase = [self.eos_token_id]
| 612 |
def __A(lowerCAmelCase ) -> bool:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
_UpperCamelCase = str(lowerCAmelCase )
_UpperCamelCase = """""".join(sorted(lowerCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __A(lowerCAmelCase = 9_9 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
_UpperCamelCase = 0
_UpperCamelCase = 1
while True:
if check_bouncy(lowerCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 612 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = KandinskyVaaControlnetPipeline
_UpperCAmelCase = ["image_embeds", "negative_image_embeds", "hint"]
_UpperCAmelCase = ["image_embeds", "negative_image_embeds", "hint"]
_UpperCAmelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase = False
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
return 32
@property
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
return 32
@property
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
return self.time_input_dim
@property
def lowerCAmelCase_ ( self: str ) -> Optional[int]:
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
return 1_00
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
torch.manual_seed(0 )
snake_case__ = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
torch.manual_seed(0 )
snake_case__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case__ = self.dummy_unet
snake_case__ = self.dummy_movq
snake_case__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCamelCase , )
snake_case__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: int , UpperCamelCase: Dict=0 ) -> int:
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase )
# create hint
snake_case__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith('mps' ):
snake_case__ = torch.manual_seed(UpperCamelCase )
else:
snake_case__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
snake_case__ = 'cpu'
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**UpperCamelCase )
snake_case__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
snake_case__ = output.images
snake_case__ = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
snake_case__ = torch.from_numpy(np.array(UpperCamelCase ) ).float() / 255.0
snake_case__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
snake_case__ = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
snake_case__ = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = 'A robot, 4k photo'
snake_case__ = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ , snake_case__ = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case__ = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ = pipeline(
image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , hint=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=1_00 , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 372 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : int = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 372 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCAmelCase : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase : Dict = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase : int = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowercase )
if decoder_head_mask is None:
_lowerCAmelCase : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowercase )
if cross_attn_head_mask is None:
_lowerCAmelCase : str = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowercase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __A :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=16 , _snake_case=2 , _snake_case=4 , _snake_case=4 , _snake_case="relu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=20 , _snake_case=2 , _snake_case=1 , _snake_case=0 , ):
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = encoder_layerdrop
_lowerCAmelCase : int = decoder_layerdrop
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Optional[int] = eos_token_id
_lowerCAmelCase : Optional[Any] = pad_token_id
_lowerCAmelCase : Any = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Any = self.eos_token_id # Eos Token
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase : List[Any] = input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : Union[str, Any] = self.get_config()
_lowerCAmelCase : Optional[Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : List[str] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
_lowerCAmelCase : str = inputs_dict["input_ids"]
_lowerCAmelCase : Optional[int] = inputs_dict["attention_mask"]
_lowerCAmelCase : List[Any] = inputs_dict["head_mask"]
# first forward pass
_lowerCAmelCase : Union[str, Any] = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCAmelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCAmelCase : Optional[int] = model(__A , attention_mask=__A )["last_hidden_state"]
_lowerCAmelCase : Any = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
_lowerCAmelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-2 ) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : List[Any] = MaMaaaModel(config=__A ).to(__A ).eval()
_lowerCAmelCase : Tuple = model(**__A )
_lowerCAmelCase : Tuple = outputs.encoder_last_hidden_state
_lowerCAmelCase : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = model.get_encoder()
encoder.save_pretrained(__A )
_lowerCAmelCase : Tuple = MaMaaaEncoder.from_pretrained(__A ).to(__A )
_lowerCAmelCase : Optional[Any] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Tuple = model.get_decoder()
decoder.save_pretrained(__A )
_lowerCAmelCase : Dict = MaMaaaDecoder.from_pretrained(__A ).to(__A )
_lowerCAmelCase : int = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = MaMaaaModelTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=__A )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
_lowerCAmelCase , _lowerCAmelCase : int = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_lowerCAmelCase : Optional[Any] = model_class(__A )
model.to(__A )
model.eval()
_lowerCAmelCase : Any = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
_lowerCAmelCase : str = inputs["input_ids"]
del inputs["input_ids"]
else:
_lowerCAmelCase : Dict = inputs["input_ids"]
_lowerCAmelCase : int = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
_lowerCAmelCase : List[str] = model.get_input_embeddings()
if not self.is_encoder_decoder:
_lowerCAmelCase : str = wte(__A )
else:
_lowerCAmelCase : Union[str, Any] = wte(__A )
_lowerCAmelCase : Optional[Any] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Dict = input_dict["input_ids"]
_lowerCAmelCase : str = input_ids.ne(1 ).to(__A )
_lowerCAmelCase : int = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
return torch.tensor(__lowercase , dtype=torch.long , device=__lowercase )
snake_case = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
_lowerCAmelCase : Optional[int] = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
_lowerCAmelCase : Any = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
_lowerCAmelCase : List[str] = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**__A )[0]
_lowerCAmelCase : Any = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , __A )
# change to expected output here
_lowerCAmelCase : Tuple = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
_lowerCAmelCase : str = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
_lowerCAmelCase : Dict = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
_lowerCAmelCase : List[Any] = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
_lowerCAmelCase : str = model(**__A )[0]
_lowerCAmelCase : Any = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
_lowerCAmelCase : str = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
_lowerCAmelCase : Any = [
"L\'affaire NSA souligne l\'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_lowerCAmelCase : str = tokenizer(__A , padding=__A , return_tensors="pt" )
_lowerCAmelCase : Any = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
_lowerCAmelCase : Tuple = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
_lowerCAmelCase : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 424 |
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : Tuple , __lowercase : Tuple ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowercase , n - 1 , __lowercase ) * a) % mod
else:
__UpperCamelCase = binary_exponentiation(__lowercase , n / 2 , __lowercase )
return (b * b) % mod
# a prime number
a__ : List[str] =701
a__ : Union[str, Any] =1_000_000_000
a__ : Union[str, Any] =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 399 | 0 |
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Any:
return round(float(moles / volume ) * nfactor )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Optional[Any]:
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> List[Any]:
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> int:
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : Tuple = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase : int = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = '''mask2former'''
UpperCamelCase : Any = ['''swin''']
UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , _A = None , _A = 256 , _A = 256 , _A = 256 , _A = 1024 , _A = "relu" , _A = 6 , _A = 10 , _A = 8 , _A = 0.0 , _A = 2048 , _A = False , _A = False , _A = 4 , _A = 255 , _A = 100 , _A = 0.1 , _A = 2.0 , _A = 5.0 , _A = 5.0 , _A = 12544 , _A = 3.0 , _A = 0.7_5 , _A = 0.0_2 , _A = 1.0 , _A = True , _A = [4, 8, 16, 32] , _A = None , **_A , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__A : Optional[int] = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_A , _A ):
__A : Dict = backbone_config.pop('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[str] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
__A : Optional[int] = backbone_config
__A : Optional[Any] = feature_size
__A : Any = mask_feature_size
__A : Optional[Any] = hidden_dim
__A : Union[str, Any] = encoder_feedforward_dim
__A : Optional[Any] = activation_function
__A : List[Any] = encoder_layers
__A : Union[str, Any] = decoder_layers
__A : Dict = num_attention_heads
__A : Tuple = dropout
__A : Dict = dim_feedforward
__A : Tuple = pre_norm
__A : Dict = enforce_input_projection
__A : Optional[int] = common_stride
__A : Optional[Any] = ignore_value
__A : str = num_queries
__A : List[Any] = no_object_weight
__A : List[str] = class_weight
__A : List[Any] = mask_weight
__A : List[Any] = dice_weight
__A : Tuple = train_num_points
__A : Optional[Any] = oversample_ratio
__A : Union[str, Any] = importance_sample_ratio
__A : Union[str, Any] = init_std
__A : int = init_xavier_std
__A : Union[str, Any] = use_auxiliary_loss
__A : Union[str, Any] = feature_strides
__A : List[Any] = output_auxiliary_logits
__A : Optional[Any] = decoder_layers
super().__init__(**_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
return cls(
backbone_config=_A , **_A , )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = copy.deepcopy(self.__dict__ )
__A : List[Any] = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
| 77 | 0 |
from __future__ import annotations
from typing import Any
class lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , snake_case : int , snake_case : int , snake_case : float = 0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = row, column
SCREAMING_SNAKE_CASE : List[Any] = [[default_value for c in range(snake_case )] for r in range(snake_case )]
def __str__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
SCREAMING_SNAKE_CASE : int = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE : Union[str, Any] = max(snake_case , len(str(snake_case ) ) )
SCREAMING_SNAKE_CASE : List[Any] = f'''%{max_element_length}s'''
# Make string and return
def single_line(snake_case : list[float] ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE : Tuple = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(snake_case ) for row_vector in self.array )
return s
def __repr__( self : List[str] ):
'''simple docstring'''
return str(self )
def lowerCamelCase_ ( self : List[str] , snake_case : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(snake_case , (list, tuple) ) and len(snake_case ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : List[Any] , snake_case : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(snake_case )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , snake_case : tuple[int, int] , snake_case : float ):
'''simple docstring'''
assert self.validate_indicies(snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = value
def __add__( self : List[str] , snake_case : Matrix ):
'''simple docstring'''
assert isinstance(snake_case , snake_case )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : Any = self[r, c] + another[r, c]
return result
def __neg__( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : Union[str, Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , snake_case : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : int , snake_case : int | float | Matrix ):
'''simple docstring'''
if isinstance(snake_case , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : Dict = self[r, c] * another
return result
elif isinstance(snake_case , snake_case ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE : Tuple = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE : Tuple = f'''Unsupported type given for another ({type(snake_case )})'''
raise TypeError(snake_case )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : List[str] = self[r, c]
return result
def lowerCamelCase_ ( self : List[Any] , snake_case : Matrix , snake_case : Matrix ):
'''simple docstring'''
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE : Optional[Any] = v.transpose()
SCREAMING_SNAKE_CASE : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __a ( ) -> None:
# a^(-1)
SCREAMING_SNAKE_CASE : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
SCREAMING_SNAKE_CASE : Dict = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 1, 2, -3
SCREAMING_SNAKE_CASE : Optional[Any] = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}''' )
def __a ( ) -> None:
import doctest
doctest.testmod()
testa() | 352 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCamelCase : Any = False
class lowercase ( unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Any = VersatileDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : List[str] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cyberpunk 2077'
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.dual_guided(
prompt=snake_case , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
SCREAMING_SNAKE_CASE : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Any = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.text_to_image(
prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(snake_case , generator=snake_case , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 352 | 1 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=0 ):
# Format the message.
if name is None:
A_ : Optional[Any] = None
else:
A_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
A_ : Any = fmt.format(snake_case__ )
# Print and recurse (if needed).
if isinstance(snake_case__ , snake_case__ ):
if msg is not None:
print(snake_case__ )
for k in val.keys():
recursive_print(snake_case__ , val[k] , spaces + 2 )
elif isinstance(snake_case__ , torch.Tensor ):
print(snake_case__ , """:""" , val.size() )
else:
print(snake_case__ , """:""" , snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
A_ : Optional[Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
A_ : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
A_ : List[Any] = param.view(*snake_case__ )
A_ : str = param.transpose(0 , 2 )
A_ : Dict = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
A_ : str = (num_heads, num_splits, hidden_size) + input_shape[1:]
A_ : Dict = param.view(*snake_case__ )
A_ : Any = param.transpose(0 , 1 ).contiguous()
A_ : List[Any] = param.view(*snake_case__ )
return param
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
# The converted output model.
A_ : List[Any] = {}
# old versions did not store training args
A_ : Optional[Any] = input_state_dict.get("""args""" , snake_case__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
A_ : Any = ds_args.padded_vocab_size
A_ : Tuple = ds_args.max_position_embeddings
A_ : Union[str, Any] = ds_args.hidden_size
A_ : List[Any] = ds_args.num_layers
A_ : Dict = ds_args.num_attention_heads
A_ : List[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
A_ : Optional[Any] = config.n_head
# The hidden_size per head.
A_ : List[str] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
A_ : Any = input_state_dict["""checkpoint_version"""]
else:
A_ : List[Any] = 0.0
# The model.
A_ : Tuple = input_state_dict["""model"""]
# The language model.
A_ : List[str] = model["""language_model"""]
# The embeddings.
A_ : Dict = lm["""embedding"""]
# The word embeddings.
A_ : Optional[int] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
A_ : int = word_embeddings[: config.vocab_size, :]
A_ : str = word_embeddings
# The position embeddings.
A_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
A_ : str = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
A_ : Optional[int] = pos_embeddings
# The transformer.
A_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
A_ : int = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
A_ : Optional[int] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
A_ : Any = layer_re.match(snake_case__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
A_ : str = int(m.group(1 ) )
# The name of the operation.
A_ : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
A_ : Optional[int] = m.group(3 )
# The name of the layer.
A_ : str = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
A_ : Tuple = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
A_ : int = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
A_ : List[str] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , snake_case__ , snake_case__ )
A_ : Dict = causal_mask
# Insert a "dummy" tensor for masked_bias.
A_ : Dict = torch.tensor(-1E4 , dtype=torch.floataa )
A_ : str = masked_bias
A_ : Dict = fix_query_key_value_ordering(snake_case__ , snake_case__ , 3 , snake_case__ , snake_case__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
A_ : Dict = out_val.transpose(0 , 1 ).contiguous()
# Store.
A_ : Optional[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
A_ : Union[str, Any] = fix_query_key_value_ordering(snake_case__ , snake_case__ , 3 , snake_case__ , snake_case__ )
# Store. No change of shape.
A_ : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
A_ : Any = megatron_to_transformers[op_name]
A_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
A_ : int = megatron_to_transformers[op_name]
A_ : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
A_ : List[Any] = transformer["""final_layernorm.weight"""]
A_ : Optional[Any] = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
A_ : Union[str, Any] = word_embeddings
# It should be done!
return output_state_dict
def __UpperCamelCase ( ):
# Create the argument parser.
A_ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=snake_case__ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=snake_case__ , help="""An optional config json file describing the pre-trained model.""" , )
A_ : List[str] = parser.parse_args()
# Extract the basename.
A_ : int = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
A_ : Dict = torch.load(snake_case__ , map_location="""cpu""" )
else:
A_ : Any = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
A_ : Dict = input_state_dict.get("""args""" , snake_case__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
A_ : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
A_ : Tuple = """gelu_new"""
else:
A_ : int = """gelu"""
else:
# in the very early days this used to be "gelu_new"
A_ : List[Any] = """gelu_new"""
# Spell out all parameters in case the defaults change.
A_ : Optional[Any] = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=snake_case__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=snake_case__ , summary_activation=snake_case__ , summary_proj_to_labels=snake_case__ , summary_first_dropout=0.1 , scale_attn_weights=snake_case__ , use_cache=snake_case__ , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
A_ : Tuple = GPTaConfig.from_json_file(args.config_file )
A_ : str = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
A_ : Optional[Any] = convert_megatron_checkpoint(snake_case__ , snake_case__ , snake_case__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case__ , snake_case__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
A_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
A_ : Optional[int] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
A_ : Tuple = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
A_ : List[Any] = """gpt2"""
A_ : Dict = AutoTokenizer.from_pretrained(snake_case__ )
A_ : Optional[int] = type(snake_case__ ).__name__
A_ : Tuple = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case__ )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(snake_case__ )
# Store the state_dict to file.
A_ : int = os.path.join(snake_case__ , """pytorch_model.bin""" )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(snake_case__ , snake_case__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 480 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def lowerCamelCase(*lowerCAmelCase_ , **lowerCAmelCase_ ):
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_A : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Union[str, Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
A_ : Union[str, Any] = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = vqa_pipeline(lowerCAmelCase_ , top_k=1 )
self.assertEqual(
lowerCAmelCase_ , [
[{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}],
[{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}],
] , )
@require_torch
def lowerCamelCase(self ):
A_ : Tuple = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
A_ : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
A_ : Any = """How many cats are there?"""
A_ : Optional[int] = vqa_pipeline(image=lowerCAmelCase_ , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}, {"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}] )
A_ : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}, {"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}] )
@slow
@require_torch
def lowerCamelCase(self ):
A_ : int = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
A_ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
A_ : int = """How many cats are there?"""
A_ : List[Any] = vqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
A_ : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
A_ : Tuple = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowerCamelCase(self ):
pass
| 480 | 1 |
'''simple docstring'''
__magic_name__ = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
A_ : Dict = 0
A_ : Union[str, Any] = 0
while place < len(lowerCamelCase):
if (place + 1 < len(lowerCamelCase)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCamelCase ( lowerCamelCase : int):
A_ : Optional[int] = []
for arabic, roman in ROMAN:
((A_) , (A_)) : Optional[Any] = divmod(lowerCamelCase , lowerCamelCase)
result.append(roman * factor)
if number == 0:
break
return "".join(lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 672 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """yolos"""
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = qkv_bias
UpperCamelCase = num_detection_tokens
UpperCamelCase = use_mid_position_embeddings
UpperCamelCase = auxiliary_loss
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = version.parse("""1.11""")
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
return 1e-4
@property
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return 12
| 3 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , __A=1 / 255 , __A=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ : Any =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
SCREAMING_SNAKE_CASE_ : Dict =parent
SCREAMING_SNAKE_CASE_ : Optional[Any] =batch_size
SCREAMING_SNAKE_CASE_ : List[Any] =num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] =min_resolution
SCREAMING_SNAKE_CASE_ : str =max_resolution
SCREAMING_SNAKE_CASE_ : int =do_resize
SCREAMING_SNAKE_CASE_ : Optional[int] =size
SCREAMING_SNAKE_CASE_ : str =do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] =image_mean
SCREAMING_SNAKE_CASE_ : Any =image_std
SCREAMING_SNAKE_CASE_ : Optional[int] =do_rescale
SCREAMING_SNAKE_CASE_ : Union[str, Any] =rescale_factor
SCREAMING_SNAKE_CASE_ : str =do_pad
def _snake_case ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _snake_case ( self , __A , __A=False ) -> Any:
if not batched:
SCREAMING_SNAKE_CASE_ : str =image_inputs[0]
if isinstance(__A , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : List[Any] =int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE_ : List[Any] =self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ : Dict =int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE_ : str =self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ : str =self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE_ : Any =[]
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : str =max(__A , key=lambda __A : item[0] )[0]
SCREAMING_SNAKE_CASE_ : List[Any] =max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = YolosImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : str =YolosImageProcessingTester(self )
@property
def _snake_case ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , __A )
SCREAMING_SNAKE_CASE_ : Dict =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __A )
def _snake_case ( self ) -> List[str]:
pass
def _snake_case ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[str] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =self.image_processor_tester.get_expected_values(__A , batched=__A )
SCREAMING_SNAKE_CASE_ : str =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Dict =image_processing(__A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : str =image_processing(__A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE_ : List[str] =self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE_ : str =self.image_processing_class(do_resize=__A , do_normalize=__A , do_rescale=__A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE_ : List[Any] =image_processing_a.pad(__A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image_processing_a(__A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _snake_case ( self ) -> Any:
# prepare image and target
SCREAMING_SNAKE_CASE_ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] =json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict ={'''image_id''': 39_769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE_ : Union[str, Any] =YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE_ : Dict =image_processing(images=__A , annotations=__A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Dict =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Dict =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : str =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : str =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
SCREAMING_SNAKE_CASE_ : Dict =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
@slow
def _snake_case ( self ) -> Tuple:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] =json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
SCREAMING_SNAKE_CASE_ : Tuple =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE_ : Optional[Any] =YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Tuple =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Any =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify masks
SCREAMING_SNAKE_CASE_ : List[Any] =822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __A )
# verify orig_size
SCREAMING_SNAKE_CASE_ : List[str] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
SCREAMING_SNAKE_CASE_ : Any =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
| 443 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1_00 , )-> float:
__UpperCAmelCase = x_start
__UpperCAmelCase = fnc(_lowerCAmelCase )
__UpperCAmelCase = 0.0
for _ in range(_lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
__UpperCAmelCase = (x_end - x_start) / steps + xa
__UpperCAmelCase = fnc(_lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__UpperCAmelCase = xa
__UpperCAmelCase = fxa
return length
if __name__ == "__main__":
def _lowerCAmelCase ( _lowerCAmelCase )-> Any:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
_A: Optional[int] = 10
while i <= 100_000:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 617 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase )-> int:
if not nums:
return 0
__UpperCAmelCase = nums[0]
__UpperCAmelCase = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase = (
max_excluding + num,
max(_lowerCAmelCase , _lowerCAmelCase ),
)
return max(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 617 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[Any] = 42
@flax_register_to_config
class A__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = 32
_UpperCAmelCase : Optional[int] = 4
_UpperCAmelCase : Any = 4
_UpperCAmelCase : Dict = (
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""DownBlock2D""",
)
_UpperCAmelCase : Any = ("""UpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""")
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = (320, 640, 1280, 1280)
_UpperCAmelCase : str = 2
_UpperCAmelCase : List[Any] = 8
_UpperCAmelCase : Dict = None
_UpperCAmelCase : int = 1280
_UpperCAmelCase : Union[str, Any] = 0.0
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[int] = jnp.floataa
_UpperCAmelCase : int = True
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : str = False
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase : Optional[Any] = jnp.zeros(_a , dtype=jnp.floataa )
lowerCamelCase : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase : Optional[Any] = jax.random.split(_a )
lowerCamelCase : Optional[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(_a , _a , _a , _a )["params"]
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.block_out_channels
lowerCamelCase : str = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase : List[str] = FlaxTimestepEmbedding(_a , dtype=self.dtype )
lowerCamelCase : Optional[int] = self.only_cross_attention
if isinstance(_a , _a ):
lowerCamelCase : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
lowerCamelCase : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase : Tuple = output_channel
lowerCamelCase : List[Any] = block_out_channels[i]
lowerCamelCase : Optional[int] = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase : str = FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
lowerCamelCase : Union[str, Any] = down_blocks
# mid
lowerCamelCase : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase : Dict = []
lowerCamelCase : List[str] = list(reversed(_a ) )
lowerCamelCase : Any = list(reversed(_a ) )
lowerCamelCase : Union[str, Any] = list(reversed(_a ) )
lowerCamelCase : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase : Dict = output_channel
lowerCamelCase : Optional[int] = reversed_block_out_channels[i]
lowerCamelCase : Optional[int] = reversed_block_out_channels[min(i + 1 , len(_a ) - 1 )]
lowerCamelCase : Dict = i == len(_a ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase : Any = FlaxCrossAttnUpBlockaD(
in_channels=_a , out_channels=_a , prev_output_channel=_a , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase : Dict = FlaxUpBlockaD(
in_channels=_a , out_channels=_a , prev_output_channel=_a , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_a )
lowerCamelCase : int = output_channel
lowerCamelCase : Dict = up_blocks
# out
lowerCamelCase : int = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__ = True , __magic_name__ = False , ):
if not isinstance(_a , jnp.ndarray ):
lowerCamelCase : Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase : int = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase : List[str] = jnp.expand_dims(_a , 0 )
lowerCamelCase : int = self.time_proj(_a )
lowerCamelCase : Union[str, Any] = self.time_embedding(_a )
# 2. pre-process
lowerCamelCase : str = jnp.transpose(_a , (0, 2, 3, 1) )
lowerCamelCase : List[str] = self.conv_in(_a )
# 3. down
lowerCamelCase : Tuple = (sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
lowerCamelCase : Union[str, Any] = down_block(_a , _a , _a , deterministic=not train )
else:
lowerCamelCase : str = down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase : List[Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_a , _a ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase : Union[str, Any] = new_down_block_res_samples
# 4. mid
lowerCamelCase : List[Any] = self.mid_block(_a , _a , _a , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase : Union[str, Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase : Any = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_a , _a ):
lowerCamelCase : List[str] = up_block(
_a , temb=_a , encoder_hidden_states=_a , res_hidden_states_tuple=_a , deterministic=not train , )
else:
lowerCamelCase : Optional[int] = up_block(_a , temb=_a , res_hidden_states_tuple=_a , deterministic=not train )
# 6. post-process
lowerCamelCase : Optional[Any] = self.conv_norm_out(_a )
lowerCamelCase : Tuple = nn.silu(_a )
lowerCamelCase : Optional[Any] = self.conv_out(_a )
lowerCamelCase : List[str] = jnp.transpose(_a , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_a )
| 681 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : int = '''marian'''
__lowercase : Any = ['''past_key_values''']
__lowercase : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , __SCREAMING_SNAKE_CASE=5_8101 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=4096 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=4096 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=5_8100 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=5_8100 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
__snake_case = vocab_size
__snake_case = decoder_vocab_size or vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
class lowerCAmelCase ( __lowerCAmelCase):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case = {0: '''batch'''}
__snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super().outputs
else:
__snake_case = super(__SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__snake_case = seq_length if not self.use_past else 1
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__snake_case = dict(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
__snake_case = common_inputs['''decoder_input_ids'''].shape[1]
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = decoder_seq_length + 3
__snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , dim=1 )
__snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case = self.num_layers
__snake_case = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - min_num_layers
__snake_case = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__snake_case = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) )
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case , __snake_case = self.num_layers
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = common_inputs['''attention_mask'''].dtype
__snake_case = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
__snake_case = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(__SCREAMING_SNAKE_CASE )
]
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = tokenizer.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
__snake_case = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case = dict(tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
else:
__snake_case = self._generate_dummy_inputs_for_causal_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super()._flatten_past_key_values_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
__snake_case = super(__SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A = None , _A = None , _A = True , _A = None , _A = False , _A = None , _A = True , _A = "arrow" , **_A , ):
'''simple docstring'''
super().__init__(
split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , **_A , )
UpperCamelCase : Optional[Any] = load_from_cache_file
UpperCamelCase : List[str] = file_format
UpperCamelCase : Optional[Any] = Spark(
df=_A , features=_A , cache_dir=_A , working_dir=_A , **_A , )
def _a ( self ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_A , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 102 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a : Any= {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any]= ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple= ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_a : str= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 192 | 1 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 196 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE__ = n - k
# Calculate C(n,k)
for i in range(snake_case__ ):
result *= n - i
result //= i + 1
return result
def A ( snake_case__ ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , snake_case__ ) // (node_count + 1)
def A ( snake_case__ ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
SCREAMING_SNAKE_CASE__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def A ( snake_case__ ):
'''simple docstring'''
return catalan_number(snake_case__ ) * factorial(snake_case__ )
if __name__ == "__main__":
A_ : Optional[int] = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 196 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _a ( ):
_SCREAMING_SNAKE_CASE = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_SCREAMING_SNAKE_CASE , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_SCREAMING_SNAKE_CASE , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
def _a ( ):
_SCREAMING_SNAKE_CASE = parse_args()
# Import training_script as a module.
_SCREAMING_SNAKE_CASE = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_SCREAMING_SNAKE_CASE = script_fpath.stem
_SCREAMING_SNAKE_CASE = importlib.import_module(_SCREAMING_SNAKE_CASE )
# Patch sys.argv
_SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 493 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : int = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 493 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int=7 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : Union[str, Any]=10 , _lowerCamelCase : Any=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Any=4_00 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : List[str]=None , _lowerCamelCase : str=True , _lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , _lowerCamelCase : List[str]=[0.5, 0.5, 0.5] , _lowerCamelCase : List[Any]=None , ) -> Any:
__magic_name__ = size if size is not None else {"shortest_edge": 18}
__magic_name__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = num_frames
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = crop_size
def __A ( self : int ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = VivitImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> int:
__magic_name__ = VivitImageProcessingTester(self )
@property
def __A ( self : Union[str, Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Any ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[Any] ) -> Optional[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __A ( self : List[Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__magic_name__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__magic_name__ = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : Union[str, Any] ) -> Tuple:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : Optional[Any] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 664 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase (a__ ):
_lowercase : int = """Salesforce/blip-image-captioning-base"""
_lowercase : Union[str, Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
_lowercase : List[Any] = """image_captioner"""
_lowercase : List[Any] = AutoModelForVisionaSeq
_lowercase : Tuple = ["""image"""]
_lowercase : str = ["""text"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
return self.pre_processor(images=__UpperCamelCase , return_tensors='''pt''' )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
return self.model.generate(**__UpperCamelCase )
def UpperCAmelCase_ ( self , lowercase__ ) -> int:
"""simple docstring"""
return self.pre_processor.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )[0].strip()
| 706 |
'''simple docstring'''
from __future__ import annotations
def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
if start is None:
_snake_case : Optional[Any] = 0
if end is None:
_snake_case : Any = len(lowerCAmelCase_ ) - 1
if start >= end:
return
_snake_case : Optional[Any] = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case : int = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
__UpperCAmelCase : str = tf.data.AUTOTUNE
def A ( ):
"""simple docstring"""
snake_case_ :Optional[Any] = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config", type=_A, default="roberta-base", help="The model config to use. Note that we don't copy the model's weights, only the config!", )
parser.add_argument(
"--tokenizer", type=_A, default="unigram-tokenizer-wikitext", help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.", )
parser.add_argument(
"--per_replica_batch_size", type=_A, default=8, help="Batch size per TPU core.", )
parser.add_argument(
"--no_tpu", action="store_true", help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.", )
parser.add_argument(
"--tpu_name", type=_A, help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.", default="local", )
parser.add_argument(
"--tpu_zone", type=_A, help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.", )
parser.add_argument(
"--gcp_project", type=_A, help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16", action="store_true", help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.", )
parser.add_argument(
"--train_dataset", type=_A, help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket.", )
parser.add_argument(
"--shuffle_buffer_size", type=_A, default=2**18, help="Size of the shuffle buffer (in samples)", )
parser.add_argument(
"--eval_dataset", type=_A, help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket.", )
parser.add_argument(
"--num_epochs", type=_A, default=1, help="Number of epochs to train for.", )
parser.add_argument(
"--learning_rate", type=_A, default=1e-4, help="Learning rate to use for training.", )
parser.add_argument(
"--weight_decay_rate", type=_A, default=1e-3, help="Weight decay rate to use for training.", )
parser.add_argument(
"--max_length", type=_A, default=512, help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py", )
parser.add_argument(
"--mlm_probability", type=_A, default=0.15, help="Fraction of tokens to mask during training.", )
parser.add_argument("--output_dir", type=_A, required=_A, help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id", type=_A, help="Model ID to upload to on the Hugging Face Hub." )
snake_case_ :Any = parser.parse_args()
return args
def A ( _A ):
"""simple docstring"""
try:
if args.tpu_name:
snake_case_ :List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
snake_case_ :Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(_A )
tf.tpu.experimental.initialize_tpu_system(_A )
return tpu
def A ( _A ):
"""simple docstring"""
snake_case_ :Optional[int] = 0
for file in file_list:
snake_case_ :Dict = file.split("/" )[-1]
snake_case_ :str = re.search(R"-\d+-(\d+)\.tfrecord", _A ).group(1 )
snake_case_ :List[Any] = int(_A )
num_samples += sample_count
return num_samples
def A ( _A, _A, _A, _A, _A, _A=None ):
"""simple docstring"""
snake_case_ :Optional[Any] = count_samples(_A )
snake_case_ :Dict = tf.data.Dataset.from_tensor_slices(_A )
if shuffle:
snake_case_ :Optional[Any] = dataset.shuffle(len(_A ) )
snake_case_ :int = tf.data.TFRecordDataset(_A, num_parallel_reads=_A )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ :Tuple = dataset.apply(tf.data.experimental.assert_cardinality(_A ) )
snake_case_ :Optional[int] = dataset.map(_A, num_parallel_calls=_A )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ :Optional[int] = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ :Optional[Any] = dataset.batch(_A, drop_remainder=_A )
snake_case_ :Union[str, Any] = dataset.map(_A, num_parallel_calls=_A )
snake_case_ :int = dataset.prefetch(_A )
return dataset
def A ( _A ):
"""simple docstring"""
if not args.no_tpu:
snake_case_ :Optional[Any] = initialize_tpu(_A )
snake_case_ :int = tf.distribute.TPUStrategy(_A )
else:
snake_case_ :Dict = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
snake_case_ :List[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ :Optional[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ :Dict = tokenizer.vocab_size
snake_case_ :Dict = tf.io.gfile.glob(os.path.join(args.train_dataset, "*.tfrecord" ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
snake_case_ :List[str] = tf.io.gfile.glob(os.path.join(args.eval_dataset, "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
snake_case_ :Tuple = count_samples(_A )
snake_case_ :Any = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ :Union[str, Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ :List[str] = TFAutoModelForMaskedLM.from_config(_A )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ :Tuple = create_optimizer(
num_train_steps=_A, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_A, metrics=["accuracy"] )
def decode_fn(_A ):
snake_case_ :Optional[Any] = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_A, _A )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ :Dict = DataCollatorForLanguageModeling(
tokenizer=_A, mlm_probability=args.mlm_probability, mlm=_A, return_tensors="tf" )
def mask_with_collator(_A ):
# TF really needs an isin() function
snake_case_ :Any = (
~tf.cast(batch["attention_mask"], tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ :Union[str, Any] = data_collator.tf_mask_tokens(
batch["input_ids"], vocab_size=len(_A ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=_A, )
return batch
snake_case_ :int = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ :List[str] = prepare_dataset(
_A, decode_fn=_A, mask_fn=_A, batch_size=_A, shuffle=_A, shuffle_buffer_size=args.shuffle_buffer_size, )
snake_case_ :Optional[Any] = prepare_dataset(
_A, decode_fn=_A, mask_fn=_A, batch_size=_A, shuffle=_A, )
snake_case_ :str = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=_A ) )
model.fit(
_A, validation_data=_A, epochs=args.num_epochs, callbacks=_A, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = parse_args()
main(args)
| 584 |
"""simple docstring"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
snake_case_ :Optional[Any] = 0
snake_case_ :Dict = 0
snake_case_ :Any = {}
def _a ( self , a ):
"""simple docstring"""
if vertex not in self.adjacency:
snake_case_ :str = {}
self.num_vertices += 1
def _a ( self , a , a , a ):
"""simple docstring"""
self.add_vertex(a )
self.add_vertex(a )
if head == tail:
return
snake_case_ :List[Any] = weight
snake_case_ :str = weight
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = self.get_edges()
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Any = edge
edges.remove((tail, head, weight) )
for i in range(len(a ) ):
snake_case_ :Dict = list(edges[i] )
edges.sort(key=lambda a : e[2] )
for i in range(len(a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case_ :Tuple = edges[i][2] + 1
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Any = edge
snake_case_ :Dict = weight
snake_case_ :int = weight
def __str__( self ):
"""simple docstring"""
snake_case_ :List[Any] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case_ :List[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def _a ( self ):
"""simple docstring"""
snake_case_ :int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _a ( self ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def _a ( a=None , a=None ):
"""simple docstring"""
snake_case_ :Optional[Any] = Graph()
if vertices is None:
snake_case_ :int = []
if edges is None:
snake_case_ :Any = []
for vertex in vertices:
g.add_vertex(a )
for edge in edges:
g.add_edge(*a )
return g
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
snake_case_ :Dict = {}
snake_case_ :Optional[int] = {}
def __len__( self ):
"""simple docstring"""
return len(self.parent )
def _a ( self , a ):
"""simple docstring"""
if item in self.parent:
return self.find(a )
snake_case_ :Optional[Any] = item
snake_case_ :str = 0
return item
def _a ( self , a ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(a )
if item != self.parent[item]:
snake_case_ :Optional[int] = self.find(self.parent[item] )
return self.parent[item]
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :Any = self.find(a )
snake_case_ :str = self.find(a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case_ :Union[str, Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case_ :Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case_ :Union[str, Any] = roota
return roota
return None
@staticmethod
def _a ( a ):
"""simple docstring"""
snake_case_ :Any = graph.num_vertices
snake_case_ :Any = Graph.UnionFind()
snake_case_ :Optional[Any] = []
while num_components > 1:
snake_case_ :List[Any] = {}
for vertex in graph.get_vertices():
snake_case_ :str = -1
snake_case_ :Tuple = graph.get_edges()
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Optional[int] = edge
snake_case_ :List[Any] = union_find.find(a )
snake_case_ :Union[str, Any] = union_find.find(a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case_ :Optional[int] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case_ :Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case_ , snake_case_ , snake_case_ :List[Any] = cheap_edge[vertex]
if union_find.find(a ) != union_find.find(a ):
union_find.union(a , a )
mst_edges.append(cheap_edge[vertex] )
snake_case_ :List[Any] = num_components - 1
snake_case_ :Any = Graph.build(edges=a )
return mst
| 584 | 1 |
"""simple docstring"""
import numpy
# List of input, output pairs
__lowerCamelCase :int = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowerCamelCase :int = (((515, 22, 13), 555), ((61, 35, 49), 150))
__lowerCamelCase :List[Any] = [2, 4, 1, 5]
__lowerCamelCase :Optional[Any] = len(train_data)
__lowerCamelCase :Tuple = 0.009
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int="train" ) -> Any:
return calculate_hypothesis_value(__snake_case , __snake_case ) - output(
__snake_case , __snake_case )
def snake_case ( UpperCamelCase__ : int ) -> Tuple:
lowerCamelCase : Union[str, Any] = 0
for i in range(len(__snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> str:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> Union[str, Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=m ) -> Tuple:
lowerCamelCase : Union[str, Any] = 0
for i in range(__snake_case ):
if index == -1:
summation_value += _error(__snake_case )
else:
summation_value += _error(__snake_case ) * train_data[i][0][index]
return summation_value
def snake_case ( UpperCamelCase__ : List[str] ) -> Any:
lowerCamelCase : str = summation_of_cost_derivative(__snake_case , __snake_case ) / m
return cost_derivative_value
def snake_case ( ) -> Union[str, Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase : Dict = 0.0_0_0_0_0_2
lowerCamelCase : List[str] = 0
lowerCamelCase : Tuple = 0
while True:
j += 1
lowerCamelCase : str = [0, 0, 0, 0]
for i in range(0 , len(__snake_case ) ):
lowerCamelCase : Any = get_cost_derivative(i - 1 )
lowerCamelCase : str = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__snake_case , __snake_case , atol=__snake_case , rtol=__snake_case , ):
break
lowerCamelCase : List[str] = temp_parameter_vector
print(("""Number of iterations:""", j) )
def snake_case ( ) -> Union[str, Any]:
for i in range(len(__snake_case ) ):
print(("""Actual output value:""", output(__snake_case , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__snake_case , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 712 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__magic_name__ : Dict = logging.get_logger(__name__)
__magic_name__ : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__magic_name__ : Optional[int] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for attribute in key.split("." ):
_snake_case = getattr(__snake_case , __snake_case )
if weight_type is not None:
_snake_case = getattr(__snake_case , __snake_case ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__snake_case )[0].split("." )[-2]
_snake_case = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
_snake_case = '''weight_g'''
elif "weight_v" in name:
_snake_case = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
_snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case = '''weight'''
else:
_snake_case = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = full_name.split("conv_layers." )[-1]
_snake_case = name.split("." )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
_snake_case = torch.load(__snake_case )
_snake_case = WavLMConfigOrig(checkpoint["cfg"] )
_snake_case = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_snake_case = WavLMConfig.from_pretrained(__snake_case )
else:
_snake_case = WavLMConfig()
_snake_case = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__magic_name__ : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 672 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def a ( __snake_case : Any, __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ :Any = b.T
UpperCAmelCase_ :Tuple = np.sum(np.square(__snake_case ), axis=1 )
UpperCAmelCase_ :List[str] = np.sum(np.square(__snake_case ), axis=0 )
UpperCAmelCase_ :Optional[Any] = np.matmul(__snake_case, __snake_case )
UpperCAmelCase_ :str = aa[:, None] - 2 * ab + ba[None, :]
return d
def a ( __snake_case : Union[str, Any], __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ :List[str] = x.reshape(-1, 3 )
UpperCAmelCase_ :List[str] = squared_euclidean_distance(__snake_case, __snake_case )
return np.argmin(__snake_case, axis=1 )
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ =["""pixel_values"""]
def __init__( self : Any , snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = PILImageResampling.BILINEAR , snake_case : bool = True , snake_case : bool = True , **snake_case : Optional[Any] , ):
super().__init__(**snake_case )
UpperCAmelCase_ :List[str] = size if size is not None else {'''height''': 256, '''width''': 256}
UpperCAmelCase_ :Any = get_size_dict(snake_case )
UpperCAmelCase_ :str = np.array(snake_case ) if clusters is not None else None
UpperCAmelCase_ :List[Any] = do_resize
UpperCAmelCase_ :Optional[int] = size
UpperCAmelCase_ :Union[str, Any] = resample
UpperCAmelCase_ :Dict = do_normalize
UpperCAmelCase_ :Optional[Any] = do_color_quantize
def snake_case_ ( self : Any , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BILINEAR , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Dict , ):
UpperCAmelCase_ :Any = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
snake_case , size=(size['''height'''], size['''width''']) , resample=snake_case , data_format=snake_case , **snake_case )
def snake_case_ ( self : Tuple , snake_case : np.ndarray , snake_case : Optional[Union[str, ChannelDimension]] = None , ):
UpperCAmelCase_ :Optional[int] = rescale(image=snake_case , scale=1 / 127.5 , data_format=snake_case )
UpperCAmelCase_ :str = image - 1
return image
def snake_case_ ( self : Any , snake_case : ImageInput , snake_case : bool = None , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = None , snake_case : bool = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **snake_case : List[Any] , ):
UpperCAmelCase_ :Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ :List[Any] = size if size is not None else self.size
UpperCAmelCase_ :Optional[int] = get_size_dict(snake_case )
UpperCAmelCase_ :Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase_ :str = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ :str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase_ :Optional[int] = clusters if clusters is not None else self.clusters
UpperCAmelCase_ :Any = np.array(snake_case )
UpperCAmelCase_ :Tuple = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ :Optional[Any] = [to_numpy_array(snake_case ) for image in images]
if do_resize:
UpperCAmelCase_ :Optional[Any] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_normalize:
UpperCAmelCase_ :Optional[Any] = [self.normalize(image=snake_case ) for image in images]
if do_color_quantize:
UpperCAmelCase_ :List[Any] = [to_channel_dimension_format(snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase_ :str = np.array(snake_case )
UpperCAmelCase_ :Dict = color_quantize(snake_case , snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase_ :Tuple = images.shape[0]
UpperCAmelCase_ :int = images.reshape(snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase_ :int = list(snake_case )
else:
UpperCAmelCase_ :Any = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
UpperCAmelCase_ :Optional[Any] = {'''input_ids''': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 608 | 0 |
"""simple docstring"""
from __future__ import annotations
A = []
def UpperCamelCase_ ( lowerCamelCase : list[list[int]] , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
"""simple docstring"""
for i in range(len(lowerCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCamelCase , -1 , -1 ) , range(lowerCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCamelCase , -1 , -1 ) , range(lowerCamelCase , len(lowerCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCamelCase_ ( lowerCamelCase : list[list[int]] , lowerCamelCase : int ) -> bool:
"""simple docstring"""
if row >= len(lowerCamelCase ):
solution.append(lowerCamelCase )
printboard(lowerCamelCase )
print()
return True
for i in range(len(lowerCamelCase ) ):
if is_safe(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Tuple = 1
solve(lowerCamelCase , row + 1 )
__magic_name__ : Tuple = 0
return False
def UpperCamelCase_ ( lowerCamelCase : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(lowerCamelCase ) ):
for j in range(len(lowerCamelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
A = 8
A = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 714 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCamelCase_ ( lowerCamelCase : ndarray ) -> float:
"""simple docstring"""
return np.dot(lowerCamelCase , lowerCamelCase )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , *,
snake_case : float = np.inf , snake_case : str = "linear" , snake_case : float = 0.0 , ) -> None:
'''simple docstring'''
__magic_name__ : Optional[int] = regularization
__magic_name__ : Dict = gamma
if kernel == "linear":
__magic_name__ : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__magic_name__ : int = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__magic_name__ : List[str] = f"""Unknown kernel: {kernel}"""
raise ValueError(snake_case )
def _UpperCAmelCase ( self : str , snake_case : ndarray , snake_case : ndarray ) -> float:
'''simple docstring'''
return np.dot(snake_case , snake_case )
def _UpperCAmelCase ( self : List[str] , snake_case : ndarray , snake_case : ndarray ) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _UpperCAmelCase ( self : Tuple , snake_case : list[ndarray] , snake_case : ndarray ) -> None:
'''simple docstring'''
__magic_name__ : List[str] = observations
__magic_name__ : List[Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__magic_name__) , ) : Union[str, Any] = np.shape(snake_case )
def to_minimize(snake_case : ndarray ) -> float:
__magic_name__ : Union[str, Any] = 0
((__magic_name__) , ) : Tuple = np.shape(snake_case )
for i in range(snake_case ):
for j in range(snake_case ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(snake_case )
__magic_name__ : Optional[Any] = LinearConstraint(snake_case , 0 , 0 )
__magic_name__ : Union[str, Any] = Bounds(0 , self.regularization )
__magic_name__ : Optional[Any] = minimize(
snake_case , np.ones(snake_case ) , bounds=snake_case , constraints=[ly_contraint] ).x
__magic_name__ : str = l_star
# calculating mean offset of separation plane to points
__magic_name__ : Any = 0
for i in range(snake_case ):
for j in range(snake_case ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__magic_name__ : str = s / n
def _UpperCAmelCase ( self : str , snake_case : ndarray ) -> int:
'''simple docstring'''
__magic_name__ : str = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , snake_case )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_lowerCAmelCase: List[Any] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_lowerCAmelCase: List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowercase( __a : str ):
if "://" in dataset_path:
a__ =dataset_path.split('://' )[1]
return dataset_path
def _lowercase( __a : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowercase( __a : fsspec.AbstractFileSystem , __a : str , __a : str ):
a__ =not is_remote_filesystem(__a )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__a ) , fs._strip_protocol(__a ) )
else:
fs.mv(__a , __a , recursive=__a )
def _lowercase( ):
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
a__ =None
a__ =None
a__ =threading.Lock()
| 20 |
'''simple docstring'''
from functools import reduce
__lowerCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a__ ( UpperCamelCase_ : str = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase_, UpperCamelCase_ : str(int(UpperCamelCase_ ) * int(UpperCamelCase_ ) ), n[i : i + 13] ) )
for i in range(len(UpperCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 467 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : int ) -> list[tuple[int, int]]:
_UpperCamelCase , _UpperCamelCase : Any = position
_UpperCamelCase : Dict = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_UpperCamelCase : Dict = []
for position in positions:
_UpperCamelCase , _UpperCamelCase : List[str] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowerCAmelCase )
return permissible_positions
def __lowerCAmelCase ( __lowerCAmelCase : list[list[int]] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def __lowerCAmelCase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : int ) -> bool:
if is_complete(__lowerCAmelCase ):
return True
for position in get_valid_pos(__lowerCAmelCase , len(__lowerCAmelCase ) ):
_UpperCamelCase , _UpperCamelCase : Optional[int] = position
if board[y][x] == 0:
_UpperCamelCase : str = curr + 1
if open_knight_tour_helper(__lowerCAmelCase , __lowerCAmelCase , curr + 1 ):
return True
_UpperCamelCase : Optional[Any] = 0
return False
def __lowerCAmelCase ( __lowerCAmelCase : int ) -> list[list[int]]:
_UpperCamelCase : Any = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
_UpperCamelCase : List[Any] = 1
if open_knight_tour_helper(__lowerCAmelCase , (i, j) , 1 ):
return board
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Any = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 |
"""simple docstring"""
import math
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self , lowerCAmelCase__=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
_UpperCamelCase : Dict = n
_UpperCamelCase : Tuple = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
_UpperCamelCase : Dict = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : List[Any] = w
def lowercase_ (self ):
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase : int = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 239 | 1 |
from math import sqrt
def snake_case__ ( lowerCamelCase_ ):
A : int = 0
for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase_ ):
total += i + n // i
elif i == sqrt(lowerCamelCase_ ):
total += i
return total - n
def snake_case__ ( lowerCamelCase_ = 10000 ):
A : Union[str, Any] = sum(
i
for i in range(1 , lowerCamelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 542 |
import string
from math import logaa
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
A : Optional[int] = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : Dict = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
A : Dict = corpus_without_punctuation.split('''\n''' )
A : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowerCamelCase_ ))
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return round(tf * idf , 3 )
| 542 | 1 |
def A_ ( lowercase_ , lowercase_ ) ->int:
"""simple docstring"""
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError('String lengths must match!' )
SCREAMING_SNAKE_CASE = 0
for chara, chara in zip(lowercase_ , lowercase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
from math import factorial
def A_ ( lowercase_ , lowercase_ ) ->int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(lowercase_ ) // (factorial(lowercase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'fifty-two card deck is: {combinations(5_2, 5)}\n',
)
print(
"If a class of 40 students must be arranged into groups of",
f'4 for group projects, there are {combinations(4_0, 4)} ways',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'are {combinations(1_0, 3)} ways that first, second and',
"third place can be awarded.",
)
| 259 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : Dict = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ : str = {
"""gpt-neox-20b""": 20_48,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
__magic_name__ :List[str] = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
__magic_name__ :List[str] = add_prefix_space
__magic_name__ :List[str] = pre_tok_class(**__lowerCAmelCase )
__magic_name__ :Any = add_prefix_space
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] )
if len(__lowerCAmelCase ) > self.model_max_length:
__magic_name__ :Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'wav2vec2'
def __init__( self : Tuple , __A : Optional[int]=32 , __A : List[str]=768 , __A : Tuple=12 , __A : str=12 , __A : Any=3072 , __A : Tuple="gelu" , __A : Tuple=0.1 , __A : Any=0.1 , __A : List[str]=0.1 , __A : List[Any]=0.0 , __A : str=0.0 , __A : str=0.1 , __A : List[Any]=0.1 , __A : Dict=0.02 , __A : int=1E-5 , __A : Dict="group" , __A : Tuple="gelu" , __A : Optional[int]=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : int=(10, 3, 3, 3, 3, 2, 2) , __A : List[Any]=False , __A : Tuple=128 , __A : Any=16 , __A : List[str]=False , __A : List[str]=True , __A : Dict=0.05 , __A : Union[str, Any]=10 , __A : int=2 , __A : List[Any]=0.0 , __A : List[Any]=10 , __A : List[str]=0 , __A : Optional[Any]=320 , __A : Optional[int]=2 , __A : List[Any]=0.1 , __A : int=100 , __A : int=256 , __A : str=256 , __A : Dict=0.1 , __A : Union[str, Any]="sum" , __A : Optional[int]=False , __A : Optional[int]=False , __A : Dict=256 , __A : List[Any]=(512, 512, 512, 512, 1500) , __A : int=(5, 3, 3, 1, 1) , __A : Optional[Any]=(1, 2, 3, 1, 1) , __A : Dict=512 , __A : Any=0 , __A : Union[str, Any]=1 , __A : List[str]=2 , __A : List[str]=False , __A : int=3 , __A : Dict=2 , __A : Optional[Any]=3 , __A : Optional[int]=None , __A : Tuple=None , **__A : Tuple , ) ->Dict:
"""simple docstring"""
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
a__ :Any = hidden_size
a__ :int = feat_extract_norm
a__ :Optional[Any] = feat_extract_activation
a__ :Union[str, Any] = list(__UpperCamelCase )
a__ :Tuple = list(__UpperCamelCase )
a__ :Any = list(__UpperCamelCase )
a__ :Dict = conv_bias
a__ :str = num_conv_pos_embeddings
a__ :Optional[Any] = num_conv_pos_embedding_groups
a__ :str = len(self.conv_dim )
a__ :Tuple = num_hidden_layers
a__ :List[str] = intermediate_size
a__ :Any = hidden_act
a__ :Union[str, Any] = num_attention_heads
a__ :Any = hidden_dropout
a__ :Tuple = attention_dropout
a__ :Union[str, Any] = activation_dropout
a__ :int = feat_proj_dropout
a__ :List[str] = final_dropout
a__ :int = layerdrop
a__ :str = layer_norm_eps
a__ :int = initializer_range
a__ :Optional[int] = vocab_size
a__ :Dict = do_stable_layer_norm
a__ :Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ :Dict = apply_spec_augment
a__ :Any = mask_time_prob
a__ :Union[str, Any] = mask_time_length
a__ :Tuple = mask_time_min_masks
a__ :List[str] = mask_feature_prob
a__ :Tuple = mask_feature_length
a__ :Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a__ :List[str] = num_codevectors_per_group
a__ :Optional[Any] = num_codevector_groups
a__ :List[str] = contrastive_logits_temperature
a__ :List[Any] = feat_quantizer_dropout
a__ :Optional[Any] = num_negatives
a__ :str = codevector_dim
a__ :str = proj_codevector_dim
a__ :List[Any] = diversity_loss_weight
# ctc loss
a__ :Dict = ctc_loss_reduction
a__ :Dict = ctc_zero_infinity
# adapter
a__ :Optional[Any] = add_adapter
a__ :List[str] = adapter_kernel_size
a__ :int = adapter_stride
a__ :str = num_adapter_layers
a__ :List[Any] = output_hidden_size or hidden_size
a__ :int = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a__ :str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a__ :Optional[int] = list(__UpperCamelCase )
a__ :Dict = list(__UpperCamelCase )
a__ :List[Any] = list(__UpperCamelCase )
a__ :Optional[Any] = xvector_output_dim
@property
def _snake_case ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 715 |
def lowerCamelCase__ ( a : int = 1_000_000 ) -> int:
"""simple docstring"""
a__ :int = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , a ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 373 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a__ = TypeVar('''T''')
def A__ (snake_case : Tuple ) -> List[Any]:
return (position - 1) // 2
def A__ (snake_case : int ) -> List[str]:
return (2 * position) + 1
def A__ (snake_case : Optional[Any] ) -> Union[str, Any]:
return (2 * position) + 2
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : int ) -> Dict:
"""simple docstring"""
__UpperCamelCase : list[tuple[T, int]] = []
__UpperCamelCase : dict[T, int] = {}
__UpperCamelCase : int = 0
def __len__( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.elements
def __repr__( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return str(self.heap )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.elements == 0
def lowerCamelCase__ ( self : str , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.heap.append((elem, weight) )
__UpperCamelCase : Union[str, Any] = self.elements
self.elements += 1
self._bubble_up(lowerCAmelCase__ )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCamelCase : Any = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCamelCase : List[Any] = self.heap[0]
self._bubble_down(lowerCAmelCase__ )
return elem
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__UpperCamelCase : str = self.position_map[elem]
__UpperCamelCase : List[str] = (elem, weight)
if position > 0:
__UpperCamelCase : Tuple = get_parent_position(lowerCAmelCase__ )
__UpperCamelCase : Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCAmelCase__ )
else:
self._bubble_down(lowerCAmelCase__ )
else:
self._bubble_down(lowerCAmelCase__ )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : int = self.position_map[elem]
if curr_pos == 0:
return None
__UpperCamelCase : Optional[Any] = get_parent_position(lowerCAmelCase__ )
__UpperCamelCase : Tuple = self.heap[curr_pos]
__UpperCamelCase : Dict = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
return self._bubble_up(lowerCAmelCase__ )
return None
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = self.position_map[elem]
__UpperCamelCase : Any = self.heap[curr_pos]
__UpperCamelCase : Tuple = get_child_left_position(lowerCAmelCase__ )
__UpperCamelCase : Any = get_child_right_position(lowerCAmelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCamelCase : List[Any] = self.heap[child_left_position]
__UpperCamelCase : Union[str, Any] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
return self._bubble_down(lowerCAmelCase__ )
if child_left_position < self.elements:
__UpperCamelCase : Dict = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
return self._bubble_down(lowerCAmelCase__ )
else:
return None
if child_right_position < self.elements:
__UpperCamelCase : str = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
return self._bubble_down(lowerCAmelCase__ )
return None
def lowerCamelCase__ ( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : List[Any] = self.heap[nodea_pos][0]
__UpperCamelCase : Dict = self.heap[nodea_pos][0]
__UpperCamelCase : List[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCamelCase : Tuple = nodea_pos
__UpperCamelCase : Any = nodea_pos
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> Any:
"""simple docstring"""
__UpperCamelCase : dict[T, dict[T, int]] = {}
__UpperCamelCase : int = 0
def __repr__( self : Tuple ) -> Dict:
"""simple docstring"""
return str(self.connections )
def __len__( self : int ) -> List[str]:
"""simple docstring"""
return self.nodes
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
if node not in self.connections:
__UpperCamelCase : Union[str, Any] = {}
self.nodes += 1
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
self.add_node(lowerCAmelCase__ )
self.add_node(lowerCAmelCase__ )
__UpperCamelCase : str = weight
__UpperCamelCase : int = weight
def A__ (snake_case : Optional[int] , ) -> Any:
__UpperCamelCase : dict[T, int] = {node: maxsize for node in graph.connections}
__UpperCamelCase : dict[T, T | None] = {node: None for node in graph.connections}
__UpperCamelCase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase_ , lowerCAmelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCamelCase : str = priority_queue.extract_min()
__UpperCamelCase : Tuple = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCamelCase : Optional[int] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase_ , dist[neighbour] )
__UpperCamelCase : int = node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCamelCase : Optional[int] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCamelCase : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase_ , dist[neighbour] )
__UpperCamelCase : Any = node
return dist, parent
| 279 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase ( __a ):
snake_case : str = """cvt"""
def __init__(self , lowerCAmelCase__=3 , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[6_4, 1_9_2, 3_8_4] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 1_0] , lowerCAmelCase__=[4.0, 4.0, 4.0] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=[0.0, 0.0, 0.1] , lowerCAmelCase__=[True, True, True] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__=[3, 3, 3] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : int = num_channels
_UpperCAmelCase : Any = patch_sizes
_UpperCAmelCase : Any = patch_stride
_UpperCAmelCase : Optional[int] = patch_padding
_UpperCAmelCase : Any = embed_dim
_UpperCAmelCase : List[Any] = num_heads
_UpperCAmelCase : List[Any] = depth
_UpperCAmelCase : Tuple = mlp_ratio
_UpperCAmelCase : Optional[Any] = attention_drop_rate
_UpperCAmelCase : Dict = drop_rate
_UpperCAmelCase : Union[str, Any] = drop_path_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[Any] = cls_token
_UpperCAmelCase : Tuple = qkv_projection_method
_UpperCAmelCase : str = kernel_qkv
_UpperCAmelCase : Tuple = padding_kv
_UpperCAmelCase : Dict = stride_kv
_UpperCAmelCase : int = padding_q
_UpperCAmelCase : Union[str, Any] = stride_q
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
| 414 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def snake_case__ ( __lowercase = "isbn/0140328726" ) -> dict:
"""simple docstring"""
A__ : Any = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
A__ : str = F'{olid} is not a valid Open Library olid'
raise ValueError(__lowercase )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def snake_case__ ( __lowercase ) -> dict:
"""simple docstring"""
A__ : List[Any] = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
A__ : int = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A__ : Optional[int] = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
A__ : Optional[int] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(__lowercase , __lowercase ):
A__ : int = ", ".join(__lowercase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case : List[str] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
snake_case : str = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print('\n'.join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""") | 720 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case : List[str] = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def _lowercase ( cls : str):
A__ : int = TOKEN
HfFolder.save_token(_A)
@classmethod
def _lowercase ( cls : Optional[Any]):
try:
delete_repo(token=cls._token , repo_id="test-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config")
except HTTPError:
pass
def _lowercase ( self : Dict):
A__ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub("test-config" , use_auth_token=self._token)
A__ : Dict = BertConfig.from_pretrained(F'{USER}/test-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A))
# Reset repo
delete_repo(token=self._token , repo_id="test-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_A , repo_id="test-config" , push_to_hub=_A , use_auth_token=self._token)
A__ : Tuple = BertConfig.from_pretrained(F'{USER}/test-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A))
def _lowercase ( self : List[Any]):
A__ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token)
A__ : Union[str, Any] = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_A , repo_id="valid_org/test-config-org" , push_to_hub=_A , use_auth_token=self._token)
A__ : List[str] = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_A , getattr(_A , _A))
def _lowercase ( self : Optional[int]):
CustomConfig.register_for_auto_class()
A__ : Optional[Any] = CustomConfig(attribute=42)
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"})
A__ : Optional[Any] = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=_A)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig")
self.assertEqual(new_config.attribute , 42)
class lowerCAmelCase__ ( unittest.TestCase ):
def _lowercase ( self : Optional[int]):
A__ : List[str] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A__ : Optional[int] = c.n_embd + 1 # int
A__ : Tuple = c.resid_pdrop + 1.0 # float
A__ : Any = not c.scale_attn_weights # bool
A__ : List[Any] = c.summary_type + "foo" # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}')
self.assertEqual(_A , c.n_embd , "mismatch for key: n_embd")
self.assertEqual(_A , c.resid_pdrop , "mismatch for key: resid_pdrop")
self.assertEqual(_A , c.scale_attn_weights , "mismatch for key: scale_attn_weights")
self.assertEqual(_A , c.summary_type , "mismatch for key: summary_type")
def _lowercase ( self : Optional[int]):
A__ : Any = PretrainedConfig()
A__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_A , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"])
A__ : Optional[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(_A , _A)]
if len(_A) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F' {", ".join(_A)}.')
def _lowercase ( self : List[str]):
with self.assertRaises(_A):
# config is in subfolder, the following should not work without specifying the subfolder
A__ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder")
A__ : str = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert")
self.assertIsNotNone(_A)
def _lowercase ( self : List[str]):
# A mock response for an HTTP head request to emulate server down
A__ : Tuple = mock.Mock()
A__ : Dict = 500
A__ : Union[str, Any] = {}
A__ : List[Any] = HTTPError
A__ : Optional[int] = {}
# Download this model to make sure it's in the cache.
A__ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_A) as mock_head:
A__ : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Optional[int]):
# This test is for deprecated behavior and can be removed in v5
A__ : List[str] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json")
def _lowercase ( self : Union[str, Any]):
A__ : Union[str, Any] = AutoConfig.from_pretrained("bert-base-cased")
A__ : str = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_A)
A__ : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(_A , "config.4.0.0.json") , "w"))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A__ : Any = AutoConfig.from_pretrained(_A)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A__ : str = ["config.42.0.0.json"]
A__ : Optional[int] = 768
configuration.save_pretrained(_A)
shutil.move(os.path.join(_A , "config.4.0.0.json") , os.path.join(_A , "config.42.0.0.json"))
A__ : Optional[int] = AutoConfig.from_pretrained(_A)
self.assertEqual(new_configuration.hidden_size , 768)
def _lowercase ( self : Optional[int]):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
A__ : List[str] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
A__ : int = "v4.0.0"
A__ , A__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
_A , return_unused_kwargs=_A)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_A , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A__ : List[Any] = "v3.0.0"
A__ : Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_A)
self.assertEqual(old_configuration.hidden_size , 768) | 182 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = StableDiffusionDiffEditPipeline
__snake_case : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
__snake_case : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
__snake_case : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case : List[Any] = frozenset([] )
def A ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
lowerCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
lowerCAmelCase_ : List[Any] = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_zero=UpperCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
lowerCAmelCase_ : str = CLIPTextModel(UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=0 ):
lowerCAmelCase_ : Tuple = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(UpperCamelCase_ )
else:
lowerCAmelCase_ : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase_ : Optional[int] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=0 ):
lowerCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowerCAmelCase_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ : List[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" )
if str(UpperCamelCase_ ).startswith("""mps""" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(UpperCamelCase_ )
else:
lowerCAmelCase_ : Optional[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase_ : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=0 ):
lowerCAmelCase_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" )
if str(UpperCamelCase_ ).startswith("""mps""" ):
lowerCAmelCase_ : Tuple = torch.manual_seed(UpperCamelCase_ )
else:
lowerCAmelCase_ : Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase_ : str = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def A ( self : List[str] ):
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowerCAmelCase_ : int = self.get_dummy_components()
lowerCAmelCase_ : List[Any] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCAmelCase_ : Dict = self.get_dummy_inputs(UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
lowerCAmelCase_ : str = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase_ , UpperCamelCase_ ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
lowerCAmelCase_ : Optional[int] = self.get_dummy_inputs(UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] = pipe_loaded(**UpperCamelCase_ )[0]
lowerCAmelCase_ : Tuple = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase_ , 1e-4 )
def A ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = 'cpu'
lowerCAmelCase_ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase_ : List[str] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase_ : List[Any] = self.get_dummy_mask_inputs(UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] = pipe.generate_mask(**UpperCamelCase_ )
lowerCAmelCase_ : Any = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCAmelCase_ : str = np.array([0] * 9 )
lowerCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = 'cpu'
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Optional[int] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase_ : List[Any] = self.get_dummy_inversion_inputs(UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] = pipe.invert(**UpperCamelCase_ ).images
lowerCAmelCase_ : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase_ : Optional[int] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
lowerCAmelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1e-3 )
def A ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = 'cpu'
lowerCAmelCase_ : int = self.get_dummy_components()
lowerCAmelCase_ : List[Any] = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
lowerCAmelCase_ : List[str] = DPMSolverMultistepScheduler(**UpperCamelCase_ )
lowerCAmelCase_ : Tuple = DPMSolverMultistepInverseScheduler(**UpperCamelCase_ )
lowerCAmelCase_ : List[Any] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(UpperCamelCase_ )
lowerCAmelCase_ : Tuple = pipe.invert(**UpperCamelCase_ ).images
lowerCAmelCase_ : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase_ : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
lowerCAmelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1e-3 )
@require_torch_gpu
@slow
class __a ( unittest.TestCase ):
def A ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Optional[int] ):
lowerCAmelCase_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowerCAmelCase_ : List[Any] = raw_image.convert("""RGB""" ).resize((7_68, 7_68) )
lowerCAmelCase_ : List[Any] = raw_image
def A ( self : str ):
lowerCAmelCase_ : int = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
lowerCAmelCase_ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase_ : Tuple = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase_ : List[str] = 'a bowl of fruit'
lowerCAmelCase_ : Optional[Any] = 'a bowl of pears'
lowerCAmelCase_ : Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase_ , target_prompt=UpperCamelCase_ , generator=UpperCamelCase_ , )
lowerCAmelCase_ : str = pipe.invert(
prompt=UpperCamelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase_ ).latents
lowerCAmelCase_ : Any = pipe(
prompt=UpperCamelCase_ , mask_image=UpperCamelCase_ , image_latents=UpperCamelCase_ , generator=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowerCAmelCase_ : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : int ):
lowerCAmelCase_ : str = torch.manual_seed(0 )
lowerCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
lowerCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase_ : List[Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase_ : List[str] = 'a bowl of fruit'
lowerCAmelCase_ : Tuple = 'a bowl of pears'
lowerCAmelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase_ , target_prompt=UpperCamelCase_ , generator=UpperCamelCase_ , )
lowerCAmelCase_ : Tuple = pipe.invert(
prompt=UpperCamelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase_ , num_inference_steps=25 , ).latents
lowerCAmelCase_ : Any = pipe(
prompt=UpperCamelCase_ , mask_image=UpperCamelCase_ , image_latents=UpperCamelCase_ , generator=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowerCAmelCase_ : Union[str, Any] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 600 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def __snake_case ( cls ):
UpperCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
UpperCAmelCase__ : Optional[int] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , repo_id='test-config' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id='valid_org/test-config-org' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCAmelCase__ : Optional[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self ):
CustomConfig.register_for_auto_class()
UpperCAmelCase__ : Dict = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class a ( unittest.TestCase ):
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase__ : List[str] = c.n_embd + 1 # int
UpperCAmelCase__ : Optional[int] = c.resid_pdrop + 1.0 # float
UpperCAmelCase__ : Optional[int] = not c.scale_attn_weights # bool
UpperCAmelCase__ : str = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCamelCase_ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCamelCase_ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCamelCase_ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCamelCase_ , c.summary_type , 'mismatch for key: summary_type' )
def __snake_case ( self ):
UpperCAmelCase__ : Dict = PretrainedConfig()
UpperCAmelCase__ : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCamelCase_ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
UpperCAmelCase__ : int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCamelCase_ , UpperCamelCase_ )]
if len(UpperCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCamelCase_ )}.''' )
def __snake_case ( self ):
with self.assertRaises(UpperCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
UpperCAmelCase__ : str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCamelCase_ )
def __snake_case ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase__ : str = mock.Mock()
UpperCAmelCase__ : List[Any] = 500
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Dict = HTTPError
UpperCAmelCase__ : Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCamelCase_ ) as mock_head:
UpperCAmelCase__ : Optional[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase__ : List[str] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __snake_case ( self ):
UpperCAmelCase__ : int = AutoConfig.from_pretrained('bert-base-cased' )
UpperCAmelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCamelCase_ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase__ : Tuple = ['config.42.0.0.json']
UpperCAmelCase__ : Tuple = 768
configuration.save_pretrained(UpperCamelCase_ )
shutil.move(os.path.join(UpperCamelCase_ , 'config.4.0.0.json' ) , os.path.join(UpperCamelCase_ , 'config.42.0.0.json' ) )
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __snake_case ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
UpperCAmelCase__ : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
UpperCAmelCase__ : Optional[int] = 'v4.0.0'
UpperCAmelCase__ , UpperCAmelCase__ : Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCamelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase__ : List[Any] = 'v3.0.0'
UpperCAmelCase__ : Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 110 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase__ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase_ ( cls : int ) -> Dict:
UpperCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def UpperCAmelCase_ ( cls : Tuple ) -> Any:
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : str = FlaxBertModel(lowercase_ )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
UpperCAmelCase : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1E-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ , repo_id='test-model-flax' , push_to_hub=lowercase_ , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
UpperCAmelCase : List[str] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Union[str, Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1E-3 , msg=f"""{key} not identical""" )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Optional[Any] = FlaxBertModel(lowercase_ )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
UpperCAmelCase : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1E-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowercase_ , repo_id='valid_org/test-model-flax-org' , push_to_hub=lowercase_ , use_auth_token=self._token )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
UpperCAmelCase : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1E-3 , msg=f"""{key} not identical""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = True
UpperCAmelCase : str = flatten_dict(modela.params )
UpperCAmelCase : Tuple = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCAmelCase : Optional[int] = False
return models_are_equal
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : str = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
UpperCAmelCase : List[str] = FlaxBertModel(lowercase_ )
UpperCAmelCase : List[Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) )
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(lowercase_ )
UpperCAmelCase : int = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
UpperCAmelCase : int = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
UpperCAmelCase : Optional[int] = FlaxBertModel(lowercase_ )
UpperCAmelCase : int = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) , max_shard_size='10KB' )
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(lowercase_ )
UpperCAmelCase : List[str] = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
UpperCAmelCase : List[Any] = 'bert'
UpperCAmelCase : Any = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(lowercase_ )
UpperCAmelCase : List[Any] = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : Any = 'bert'
UpperCAmelCase : str = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Dict = FlaxBertModel.from_pretrained(lowercase_ )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = order
# a_{0} ... a_{k}
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__SCREAMING_SNAKE_CASE : Optional[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__SCREAMING_SNAKE_CASE : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
__SCREAMING_SNAKE_CASE : Tuple = [0.0] * self.order
def a_ ( self , a__ , a__ ):
if len(_lowercase ) < self.order:
__SCREAMING_SNAKE_CASE : str = [1.0, *a_coeffs]
if len(_lowercase ) != self.order + 1:
__SCREAMING_SNAKE_CASE : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowercase )}'
)
raise ValueError(_lowercase )
if len(_lowercase ) != self.order + 1:
__SCREAMING_SNAKE_CASE : Any = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_lowercase )}'
)
raise ValueError(_lowercase )
__SCREAMING_SNAKE_CASE : str = a_coeffs
__SCREAMING_SNAKE_CASE : Tuple = b_coeffs
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__SCREAMING_SNAKE_CASE : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__SCREAMING_SNAKE_CASE : Any = self.input_history[:-1]
__SCREAMING_SNAKE_CASE : int = self.output_history[:-1]
__SCREAMING_SNAKE_CASE : Optional[int] = sample
__SCREAMING_SNAKE_CASE : List[Any] = result
return result
| 211 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__(unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.0_2 , _lowercase=4 , ) -> Tuple:
a_ : Tuple = parent
a_ : Optional[Any] = batch_size
a_ : int = seq_length
a_ : List[str] = is_training
a_ : str = use_attention_mask
a_ : Optional[int] = use_token_type_ids
a_ : Optional[int] = use_labels
a_ : int = vocab_size
a_ : Union[str, Any] = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : Optional[int] = num_attention_heads
a_ : Tuple = intermediate_size
a_ : int = hidden_act
a_ : Any = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : List[str] = max_position_embeddings
a_ : Optional[int] = type_vocab_size
a_ : List[str] = type_sequence_label_size
a_ : Optional[Any] = initializer_range
a_ : Optional[int] = num_choices
def UpperCamelCase__ ( self ) -> str:
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Dict = None
if self.use_attention_mask:
a_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : List[str] = None
if self.use_token_type_ids:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : Any = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ) -> int:
a_ : Optional[Any] = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ : Any = config_and_inputs
a_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : Optional[Any] = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ : str = config_and_inputs
a_ : str = True
a_ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__(a_, unittest.TestCase ):
"""simple docstring"""
_A : List[str] = True
_A : int = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Optional[int] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCamelCase__ ( self ) -> int:
for model_class_name in self.all_model_classes:
a_ : Union[str, Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
a_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class A__(unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
a_ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
a_ : Tuple = model(_lowercase )[0]
a_ : Tuple = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
a_ : Tuple = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self ) -> Dict:
a_ : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
a_ : List[str] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
a_ : str = model(_lowercase )[0]
# compare the actual values for a slice.
a_ : Union[str, Any] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
| 540 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : Dict = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class lowerCamelCase__ ( __snake_case ):
__UpperCAmelCase = 42
class lowerCamelCase__ ( __snake_case ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=lowerCAmelCase__ , image_encoder=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , renderer=lowerCAmelCase__ , )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
"""simple docstring"""
if latents is None:
_UpperCamelCase :List[Any] =randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_UpperCamelCase :int =latents.to(lowerCAmelCase__ )
_UpperCamelCase :Optional[int] =latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self , lowerCAmelCase__=0 ) -> Tuple:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCamelCase :int =torch.device(f'''cuda:{gpu_id}''' )
_UpperCamelCase :Tuple =[self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
@property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(image[0] , torch.Tensor ):
_UpperCamelCase :List[Any] =torch.cat(lowerCAmelCase__ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCAmelCase__ , axis=0 )
if not isinstance(lowerCAmelCase__ , torch.Tensor ):
_UpperCamelCase :Union[str, Any] =self.image_processor(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_UpperCamelCase :Any =image.to(dtype=self.image_encoder.dtype , device=lowerCAmelCase__ )
_UpperCamelCase :Optional[Any] =self.image_encoder(lowerCAmelCase__ )["""last_hidden_state"""]
_UpperCamelCase :List[str] =image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCamelCase :Dict =image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
_UpperCamelCase :Optional[int] =torch.zeros_like(lowerCAmelCase__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase :int =torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 25 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 64 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> int:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
_UpperCamelCase :Any =1
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
_UpperCamelCase :Tuple =image.shape[0]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCamelCase :Tuple =len(lowerCAmelCase__ )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase__ )}''' )
_UpperCamelCase :int =self._execution_device
_UpperCamelCase :Optional[Any] =batch_size * num_images_per_prompt
_UpperCamelCase :Any =guidance_scale > 1.0
_UpperCamelCase :Optional[Any] =self._encode_image(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# prior
self.scheduler.set_timesteps(lowerCAmelCase__ , device=lowerCAmelCase__ )
_UpperCamelCase :str =self.scheduler.timesteps
_UpperCamelCase :Tuple =self.prior.config.num_embeddings
_UpperCamelCase :Dict =self.prior.config.embedding_dim
_UpperCamelCase :Any =self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCamelCase :List[str] =latents.reshape(latents.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase :List[str] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase :Tuple =self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase :str =self.prior(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , proj_embedding=lowerCAmelCase__ , ).predicted_image_embedding
# remove the variance
_UpperCamelCase :List[Any] =noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCamelCase :Tuple =noise_pred.chunk(2 )
_UpperCamelCase :str =noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCamelCase :Optional[Any] =self.scheduler.step(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase__ )
_UpperCamelCase :Optional[Any] =[]
for i, latent in enumerate(lowerCAmelCase__ ):
print()
_UpperCamelCase :Any =self.renderer.decode(
latent[None, :] , lowerCAmelCase__ , size=lowerCAmelCase__ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowerCAmelCase__ )
_UpperCamelCase :Tuple =torch.stack(lowerCAmelCase__ )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_UpperCamelCase :List[Any] =images.cpu().numpy()
if output_type == "pil":
_UpperCamelCase :List[str] =[self.numpy_to_pil(lowerCAmelCase__ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase__ ) | 707 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[int] = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 512 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[Any] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A_ : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 650 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 0 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase__ :
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=100 , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : Union[str, Any]=30 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Optional[Any]=[0, 1, 2, 3] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = 100
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = out_indices
UpperCAmelCase_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = BeitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = BeitForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase__ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = BeitForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = BeitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).pixel_values.to(_UpperCAmelCase )
# prepare bool_masked_pos
UpperCAmelCase_ = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1e-2 ) )
@slow
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
UpperCAmelCase_ = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21841) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
UpperCAmelCase_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
UpperCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ = Image.open(ds[0]["file"] )
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_UpperCAmelCase , )
else:
UpperCAmelCase_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
UpperCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ = Image.open(ds[0]["file"] )
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
UpperCAmelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 701 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''mobilenet_v1'''
def __init__( self : Tuple , _UpperCAmelCase : int=3 , _UpperCAmelCase : Union[str, Any]=224 , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Any=8 , _UpperCAmelCase : List[Any]="relu6" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=0.999 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[Any]=0.001 , **_UpperCAmelCase : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = depth_multiplier
UpperCAmelCase_ = min_depth
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = tf_padding
UpperCAmelCase_ = classifier_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1e-4
| 14 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a__: Tuple = logging.get_logger(__name__)
a__: Optional[Any] = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''mctct'''
def __init__( self,__lowerCamelCase=8065,__lowerCamelCase=1536,__lowerCamelCase=36,__lowerCamelCase=6144,__lowerCamelCase=4,__lowerCamelCase=384,__lowerCamelCase=920,__lowerCamelCase=1E-5,__lowerCamelCase=0.3,__lowerCamelCase="relu",__lowerCamelCase=0.02,__lowerCamelCase=0.3,__lowerCamelCase=0.3,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=2,__lowerCamelCase=1,__lowerCamelCase=0.3,__lowerCamelCase=1,__lowerCamelCase=(7,),__lowerCamelCase=(3,),__lowerCamelCase=80,__lowerCamelCase=1,__lowerCamelCase=None,__lowerCamelCase="sum",__lowerCamelCase=False,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase,pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = intermediate_size
A__ = num_attention_heads
A__ = attention_head_dim
A__ = max_position_embeddings
A__ = layer_norm_eps
A__ = layerdrop
A__ = hidden_act
A__ = initializer_range
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = pad_token_id
A__ = bos_token_id
A__ = eos_token_id
A__ = conv_glu_dim
A__ = conv_dropout
A__ = num_conv_layers
A__ = input_feat_per_channel
A__ = input_channels
A__ = conv_channels
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# prevents config testing fail with exporting to json
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
| 190 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 190 | 1 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class _lowercase :
def __init__( self , UpperCAmelCase_ ) -> Tuple:
lowerCamelCase : int = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCamelCase : Any = len(UpperCAmelCase_ ) - 1
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase_ ) , 5 ) == 1
return output_values
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase : str = self.basis_function(UpperCAmelCase_ )
lowerCamelCase : Dict = 0.0
lowerCamelCase : Tuple = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _UpperCamelCase ( self , UpperCAmelCase_ = 0.01 ) -> Optional[int]:
from matplotlib import pyplot as plt # type: ignore
lowerCamelCase : list[float] = [] # x coordinates of points to plot
lowerCamelCase : list[float] = [] # y coordinates of points to plot
lowerCamelCase : int = 0.0
while t <= 1:
lowerCamelCase : Union[str, Any] = self.bezier_curve_function(UpperCAmelCase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCamelCase : Optional[Any] = [i[0] for i in self.list_of_points]
lowerCamelCase : str = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase_ , UpperCAmelCase_ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(UpperCAmelCase_ , UpperCAmelCase_ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 711 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : int = Mock()
lowerCamelCase : int = conn, Mock()
lowerCamelCase : Any = iter([1, None] )
lowerCamelCase : Dict = lambda a_ : next(a_ )
# ===== invoke =====
send_file(filename='mytext.txt', testing=a_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 133 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.