code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import doctest
from collections import deque
import numpy as np
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : Any = [2, 1, 2, -1]
lowercase__ : Tuple = [1, 2, 3, 4]
def snake_case_ ( self):
lowercase__ : List[Any] = len(self.first_signal)
lowercase__ : Tuple = len(self.second_signal)
lowercase__ : Any = max(snake_case_ , snake_case_)
# create a zero matrix of max_length x max_length
lowercase__ : List[Any] = [[0] * max_length for i in range(snake_case_)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(snake_case_):
lowercase__ : List[Any] = deque(self.second_signal)
rotated_signal.rotate(snake_case_)
for j, item in enumerate(snake_case_):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase__ : Any = np.matmul(np.transpose(snake_case_) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(snake_case_ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 164 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE :Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[str] = ['''ChineseCLIPFeatureExtractor''']
__SCREAMING_SNAKE_CASE :List[str] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 236 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowercase ( _A ):
lowercase = 'mvp'
lowercase = ['past_key_values']
lowercase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str , __lowerCamelCase : Optional[Any]=5_02_67 , __lowerCamelCase : Tuple=10_24 , __lowerCamelCase : Dict=12 , __lowerCamelCase : List[str]=40_96 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : Union[str, Any]=40_96 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Optional[int]=10_24 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : str=False , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : int=1_00 , __lowerCamelCase : List[str]=8_00 , **__lowerCamelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = classifier_dropout
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = use_prompt
lowercase = prompt_length
lowercase = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCAmelCase__ ):
lowercase = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
| 718 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
A_ = 3
def __UpperCAmelCase ( UpperCAmelCase )-> int:
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
lowercase = random.randrange(3, UpperCAmelCase )
if pow(UpperCAmelCase, 2, UpperCAmelCase ) == 1:
continue
if pow(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) == 1:
continue
return g
def __UpperCAmelCase ( UpperCAmelCase )-> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('''Generating prime p...''' )
lowercase = rabin_miller.generate_large_prime(UpperCAmelCase ) # select large prime number.
lowercase = primitive_root(UpperCAmelCase ) # one primitive root on modulo p.
lowercase = random.randrange(3, UpperCAmelCase ) # private_key -> have to be greater than 2 for safety.
lowercase = cryptomath.find_mod_inverse(pow(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ), UpperCAmelCase )
lowercase = (key_size, e_a, e_a, p)
lowercase = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> None:
"""simple docstring"""
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowercase ,lowercase = generate_key(UpperCAmelCase )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt', '''w''' ) as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt', '''w''' ) as fo:
fo.write(f'{private_key[0]},{private_key[1]}' )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''', 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 479 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]:
_a = word_bank or []
# create a table
_a = len(lowercase ) + 1
_a = []
for _ in range(lowercase ):
table.append([] )
# seed value
_a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
_a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 692 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase_ : Optional[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase_ : Dict = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase_ : Dict = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase_ : Optional[int] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 692 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase_ ( _lowercase , _lowercase ) -> str:
__A : Optional[int] = BeautifulSoup(requests.get(_lowercase , params=_lowercase ).content , "html.parser" )
__A : Any = soup.find("div" , attrs={"class": "gs_ri"} )
__A : List[str] = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCamelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 387 | import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=[0, 1, 2, 3] , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=[1, 384, 24, 24] , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
__A : Dict = parent
__A : Union[str, Any] = batch_size
__A : str = image_size
__A : Optional[Any] = patch_size
__A : str = num_channels
__A : str = is_training
__A : Optional[Any] = use_labels
__A : Union[str, Any] = hidden_size
__A : int = num_hidden_layers
__A : List[Any] = backbone_out_indices
__A : Dict = num_attention_heads
__A : Dict = intermediate_size
__A : Tuple = hidden_act
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = initializer_range
__A : List[str] = num_labels
__A : str = backbone_featmap_shape
__A : int = scope
__A : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__A : Optional[Any] = (image_size // patch_size) ** 2
__A : List[Any] = num_patches + 1
def __UpperCAmelCase( self ):
__A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
__A : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase( self ):
__A : Any = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Tuple = DPTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : int = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = self.num_labels
__A : Optional[int] = DPTForDepthEstimation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Dict = model(__UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : int = self.num_labels
__A : List[Any] = DPTForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Union[str, Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase( self ):
__A : int = self.prepare_config_and_inputs()
__A , __A , __A : Dict = config_and_inputs
__A : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase_ : Dict = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Optional[Any] = False
def __UpperCAmelCase( self ):
__A : str = DPTModelTester(self )
__A : Dict = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
__A , __A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def __UpperCAmelCase( self ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(__UpperCAmelCase )
__A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Any = [*signature.parameters.keys()]
__A : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : int = True
if model_class in get_values(__UpperCAmelCase ):
continue
__A : Dict = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__A : str = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__A : Optional[Any] = model(**__UpperCAmelCase ).loss
loss.backward()
def __UpperCAmelCase( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = False
__A : Optional[int] = True
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
__A : Tuple = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
__A : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__A : List[Any] = model(**__UpperCAmelCase ).loss
loss.backward()
def __UpperCAmelCase( self ):
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__A : List[Any] = model_class(config=__UpperCAmelCase )
# Skip the check for the backbone
__A : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__A : Any = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase( self ):
pass
@slow
def __UpperCAmelCase( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__A : Tuple = DPTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __UpperCAmelCase( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__A , __A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = "add"
with self.assertRaises(__UpperCAmelCase ):
__A : int = DPTForDepthEstimation(__UpperCAmelCase )
def lowerCamelCase_ ( ) -> Optional[Any]:
__A : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : List[str] = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__A : List[Any] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__UpperCAmelCase )
__A : Dict = prepare_img()
__A : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__A : List[str] = model(**__UpperCAmelCase )
__A : str = outputs.predicted_depth
# verify the predicted depth
__A : Optional[int] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCAmelCase )
__A : Any = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCAmelCase , atol=1e-4 ) )
| 387 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase: int ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase, _lowerCamelCase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 535 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowercase ( unittest.TestCase ):
def UpperCAmelCase (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = logging.get_logger()
# the current default level is logging.WARNING
lowerCAmelCase = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = logging.get_verbosity()
lowerCAmelCase = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowerCAmelCase = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,'''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,msg + '''\n''' )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def UpperCAmelCase (self : Any ) -> str:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowerCAmelCase = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowerCAmelCase = os.getenv('''TRANSFORMERS_VERBOSITY''' ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = logging.log_levels[env_level_str]
lowerCAmelCase = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" ,)
# restore to the original level
lowerCAmelCase = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def UpperCAmelCase (self : List[Any] ) -> Tuple:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' ,cl.out )
# no need to restore as nothing was changed
def UpperCAmelCase (self : Dict ) -> Any:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowerCAmelCase = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,'''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,msg + '''\n''' )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 535 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : Tuple = checkpoint
_a : Any = {}
_a : Dict = vae_state_dict['''encoder.conv_in.weight''']
_a : Dict = vae_state_dict['''encoder.conv_in.bias''']
_a : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_a : Optional[int] = vae_state_dict['''encoder.conv_out.bias''']
_a : Optional[Any] = vae_state_dict['''encoder.norm_out.weight''']
_a : Union[str, Any] = vae_state_dict['''encoder.norm_out.bias''']
_a : str = vae_state_dict['''decoder.conv_in.weight''']
_a : Dict = vae_state_dict['''decoder.conv_in.bias''']
_a : Optional[int] = vae_state_dict['''decoder.conv_out.weight''']
_a : Union[str, Any] = vae_state_dict['''decoder.conv_out.bias''']
_a : Optional[Any] = vae_state_dict['''decoder.norm_out.weight''']
_a : str = vae_state_dict['''decoder.norm_out.bias''']
_a : Optional[int] = vae_state_dict['''quant_conv.weight''']
_a : Any = vae_state_dict['''quant_conv.bias''']
_a : int = vae_state_dict['''post_quant_conv.weight''']
_a : Any = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_a : Dict = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_a : Dict = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(UpperCamelCase_ )
}
# Retrieves the keys for the decoder up blocks only
_a : List[Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_a : str = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(UpperCamelCase_ )
}
for i in range(UpperCamelCase_ ):
_a : int = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
_a : Optional[Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
_a : Any = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
_a : List[Any] = renew_vae_resnet_paths(UpperCamelCase_ )
_a : Any = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , additional_replacements=[meta_path] , config=UpperCamelCase_ )
_a : Any = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_a : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_a : List[str] = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
_a : Tuple = renew_vae_resnet_paths(UpperCamelCase_ )
_a : Any = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , additional_replacements=[meta_path] , config=UpperCamelCase_ )
_a : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_a : Optional[Any] = renew_vae_attention_paths(UpperCamelCase_ )
_a : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , additional_replacements=[meta_path] , config=UpperCamelCase_ )
conv_attn_to_linear(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
_a : Any = num_up_blocks - 1 - i
_a : List[str] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
_a : List[Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
_a : Dict = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
_a : Optional[Any] = renew_vae_resnet_paths(UpperCamelCase_ )
_a : List[Any] = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , additional_replacements=[meta_path] , config=UpperCamelCase_ )
_a : Dict = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_a : List[str] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_a : int = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
_a : Dict = renew_vae_resnet_paths(UpperCamelCase_ )
_a : Optional[int] = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , additional_replacements=[meta_path] , config=UpperCamelCase_ )
_a : Union[str, Any] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_a : int = renew_vae_attention_paths(UpperCamelCase_ )
_a : Optional[int] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , additional_replacements=[meta_path] , config=UpperCamelCase_ )
conv_attn_to_linear(UpperCamelCase_ )
return new_checkpoint
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , ):
# Only support V1
_a : List[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_a : Optional[Any] = io.BytesIO(r.content )
_a : List[str] = OmegaConf.load(UpperCamelCase_ )
_a : Union[str, Any] = 512
_a : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_a : Optional[int] = {}
with safe_open(UpperCamelCase_ , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
_a : List[str] = f.get_tensor(UpperCamelCase_ )
else:
_a : List[Any] = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )['''state_dict''']
# Convert the VAE model.
_a : Optional[Any] = create_vae_diffusers_config(UpperCamelCase_ , image_size=UpperCamelCase_ )
_a : Optional[int] = custom_convert_ldm_vae_checkpoint(UpperCamelCase_ , UpperCamelCase_ )
_a : Tuple = AutoencoderKL(**UpperCamelCase_ )
vae.load_state_dict(UpperCamelCase_ )
vae.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__UpperCAmelCase : Optional[int] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 249 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__UpperCAmelCase : Any = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__UpperCAmelCase : Optional[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCamelCase :
def __init__( self : Tuple ) -> List[Any]:
_a : List[Any] = WATERMARK_BITS
_a : List[str] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def snake_case_ ( self : Dict , __snake_case : torch.FloatTensor ) -> Optional[Any]:
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
_a : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a : Any = [self.encoder.encode(__snake_case , '''dwtDct''' ) for image in images]
_a : Optional[int] = torch.from_numpy(np.array(__snake_case ) ).permute(0 , 3 , 1 , 2 )
_a : Any = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 249 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__UpperCamelCase : Optional[Any] = """__DUMMY_TRANSFORMERS_USER__"""
__UpperCamelCase : int = """Dummy User"""
__UpperCamelCase : Optional[Any] = """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"""
__UpperCamelCase : Optional[int] = """https://hub-ci.huggingface.co"""
__UpperCamelCase : List[str] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
__UpperCamelCase : Dict = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
__UpperCamelCase : str = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def a_ ( _A ) -> List[str]:
"""simple docstring"""
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _A )
@pytest.fixture
def a_ ( _A ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _A )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _A )
@pytest.fixture
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _A )
@pytest.fixture
def a_ ( _A , _A ) -> int:
"""simple docstring"""
HfFolder.save_token(_A )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def a_ ( ) -> int:
"""simple docstring"""
return HfApi(endpoint=_A )
@pytest.fixture(scope='session' )
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = HfFolder.get_token()
HfFolder.save_token(_A )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_A )
@pytest.fixture
def a_ ( _A ) -> Dict:
"""simple docstring"""
def _cleanup_repo(_A ):
hf_api.delete_repo(_A , token=_A , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def a_ ( _A ) -> Dict:
"""simple docstring"""
@contextmanager
def _temporary_repo(_A ):
try:
yield repo_id
finally:
cleanup_repo(_A )
return _temporary_repo
@pytest.fixture(scope='session' )
def a_ ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
snake_case__ = f'''repo_txt_data-{int(time.time() * 10e3 )}'''
snake_case__ = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_A , token=_A , repo_type='dataset' , private=_A )
hf_api.upload_file(
token=_A , path_or_fileobj=str(_A ) , path_in_repo='data/text_data.txt' , repo_id=_A , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_A , token=_A , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a_ ( _A , _A , _A ) -> int:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def a_ ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = f'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
snake_case__ = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_A , token=_A , repo_type='dataset' , private=_A )
hf_api.upload_file(
token=_A , path_or_fileobj=str(_A ) , path_in_repo='data.zip' , repo_id=_A , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_A , token=_A , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a_ ( _A , _A , _A ) -> List[str]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def a_ ( _A , _A , _A ) -> str:
"""simple docstring"""
snake_case__ = f'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
snake_case__ = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_A , token=_A , repo_type='dataset' , private=_A )
hf_api.upload_file(
token=_A , path_or_fileobj=str(_A ) , path_in_repo='data.zip' , repo_id=_A , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_A , token=_A , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a_ ( _A , _A , _A ) -> int:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 328 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__UpperCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
__UpperCamelCase : List[str] = """sshleifer/student_marian_en_ro_6_1"""
__UpperCamelCase : int = """sshleifer/tiny-mbart"""
@require_torch
class __SCREAMING_SNAKE_CASE( a_ ):
def lowerCAmelCase_ ( self: int , UpperCamelCase: Any=False , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[Any]=True , ) -> Tuple:
snake_case__ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCamelCase , num_train_epochs=1 , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , predict_with_generate=UpperCamelCase , do_train=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , )
snake_case__ = TrainerState.load_from_json(os.path.join(UpperCamelCase , 'trainer_state.json' ) ).log_history
if not do_eval:
return
snake_case__ = [log for log in logs if 'eval_loss' in log.keys()]
snake_case__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , UpperCamelCase )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: Any ) -> int:
self.run_seqaseq_quick(distributed=UpperCamelCase )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: Tuple ) -> int:
self.run_seqaseq_quick(distributed=UpperCamelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: Any ) -> Any:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: int ) -> Tuple:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=UpperCamelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=UpperCamelCase , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=UpperCamelCase )
@require_apex
@require_torch_gpu
def lowerCAmelCase_ ( self: Tuple ) -> Any:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] ) -> str:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case__ = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
snake_case__ = experiments[experiment_id]
snake_case__ = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
snake_case__ = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCamelCase , extra_args_str=data['extra_args_str'] )
snake_case__ = len(re.findall(UpperCamelCase , cl.err ) )
self.assertEqual(UpperCamelCase , data['n_matches'] )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCamelCase , )
# Check metrics
snake_case__ = TrainerState.load_from_json(os.path.join(UpperCamelCase , 'trainer_state.json' ) ).log_history
snake_case__ = [log for log in logs if 'eval_loss' in log.keys()]
snake_case__ = eval_metrics[0]
snake_case__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , UpperCamelCase )
# test if do_predict saves generations and metrics
snake_case__ = os.listdir(UpperCamelCase )
snake_case__ = {os.path.basename(UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase_ ( self: int ) -> int:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCamelCase: str ) -> Tuple[int, float]:
snake_case__ = '--skip_memory_metrics 0'
snake_case__ = self.run_trainer(
max_len=1_28 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCamelCase , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
snake_case__ = TrainerState.load_from_json(Path(UpperCamelCase , 'trainer_state.json' ) ).log_history
snake_case__ = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
snake_case__ = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
snake_case__ = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCamelCase , UpperCamelCase , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
UpperCamelCase , UpperCamelCase , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
UpperCamelCase , UpperCamelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: float = 3e-3 , UpperCamelCase: str = "adafactor" , UpperCamelCase: bool = False , UpperCamelCase: str = None , UpperCamelCase: int = 0 , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: int = None , ) -> Union[str, Any]:
snake_case__ = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case__ = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(UpperCamelCase )}
'''.split()
snake_case__ = '\n --do_predict\n '.split()
snake_case__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ = get_gpu_count()
snake_case__ = get_torch_dist_unique_port()
snake_case__ = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase , env=self.get_env() )
else:
snake_case__ = ['run_translation.py'] + args
with patch.object(UpperCamelCase , 'argv' , UpperCamelCase ):
main()
return output_dir
| 328 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Tuple = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 106 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __magic_name__ ( nn.Module):
def __init__( self : Dict ) -> str:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : int = nn.Linear(3 , 4 )
UpperCamelCase__ : str = nn.BatchNormad(4 )
UpperCamelCase__ : List[str] = nn.Linear(4 , 5 )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ) -> Any:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) -> Any:
'''simple docstring'''
return output + 1
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = ModelForTest()
UpperCamelCase__ : Union[str, Any] = ModelHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(test_model._hf_hook , lowerCamelCase__ )
self.assertTrue(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__ , '''_hf_hook''' ) )
self.assertFalse(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCamelCase__ : Dict = ModelForTest()
UpperCamelCase__ : List[str] = ModelHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ , append=lowerCamelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__ , '''_hf_hook''' ) )
self.assertFalse(hasattr(lowerCamelCase__ , '''_old_forward''' ) )
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ModelForTest()
UpperCamelCase__ : List[str] = torch.randn(2 , 3 )
UpperCamelCase__ : int = test_model(x + 1 )
UpperCamelCase__ : List[Any] = test_model(x + 2 )
UpperCamelCase__ : Optional[int] = PreForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[Any] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCamelCase__ : Optional[Any] = PreForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCamelCase__ : Tuple = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Any = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Dict = ModelForTest()
UpperCamelCase__ : str = torch.randn(2 , 3 )
UpperCamelCase__ : Any = test_model(lowerCamelCase__ )
UpperCamelCase__ : int = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCamelCase__ : Dict = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : int = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
UpperCamelCase__ : List[Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , output + 2 , atol=1E-5 )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : List[Any] = ModelForTest()
UpperCamelCase__ : Tuple = torch.randn(2 , 3 )
UpperCamelCase__ : Tuple = test_model(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Any = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : Optional[int] = test_model(lowerCamelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
UpperCamelCase__ : Tuple = torch.randn(2 , 3 )
UpperCamelCase__ : str = model(lowerCamelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase__ , AlignDevicesHook(io_same_device=lowerCamelCase__ ) )
UpperCamelCase__ : Tuple = torch.randn(2 , 3 ).to(0 )
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase__ : int = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase__ : int = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.randn(2 , 3 )
UpperCamelCase__ : List[str] = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
UpperCamelCase__ : Optional[Any] = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase__ : List[Any] = torch.randn(2 , 3 )
UpperCamelCase__ : str = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase__ : Any = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase__ : str = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase__ : int = torch.randn(2 , 3 )
UpperCamelCase__ : Tuple = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , offload_buffers=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase__ : int = torch.randn(2 , 3 )
UpperCamelCase__ : str = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
UpperCamelCase__ : List[str] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
UpperCamelCase__ : List[str] = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
UpperCamelCase__ : List[str] = torch.randn(2 , 3 )
UpperCamelCase__ : Any = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , weights_map=model.state_dict() , offload_buffers=lowerCamelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
UpperCamelCase__ : Union[str, Any] = torch.randn(2 , 3 )
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 106 | 1 |
from heapq import heappop, heappush
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
A , A : Tuple = grid.shape
A : List[str] = [-1, 1, 0, 0]
A : int = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A , A : Any = [(0, source)], set()
A : List[Any] = np.full((rows, cols) , np.inf )
A : Tuple = 0
A : List[str] = np.empty((rows, cols) , dtype=_lowerCAmelCase )
A : Any = None
while queue:
((A) , (A)) : Optional[int] = heappop(_lowerCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A : str = []
while (x, y) != source:
path.append((x, y) )
A , A : str = predecessors[x, y]
path.append(_lowerCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowerCAmelCase ) ):
A , A : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowerCAmelCase , (dist + 1, (nx, ny)) )
A : List[str] = dist + 1
A : str = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :float = 0 ) -> None:
'''simple docstring'''
snake_case_, snake_case_ : str = row, column
snake_case_ : Dict = [[default_value for c in range(lowerCAmelCase__ )] for r in range(lowerCAmelCase__ )]
def __str__( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : str = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
snake_case_ : Any = 0
for row_vector in self.array:
for obj in row_vector:
snake_case_ : Dict = max(lowerCAmelCase__ , len(str(lowerCAmelCase__ ) ) )
snake_case_ : str = F'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCAmelCase__ :list[float] ) -> str:
nonlocal string_format_identifier
snake_case_ : Tuple = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self :Tuple ) -> str:
'''simple docstring'''
return str(self )
def _A ( self :List[Any] , lowerCAmelCase__ :tuple[int, int] ) -> bool:
'''simple docstring'''
if not (isinstance(lowerCAmelCase__ , (list, tuple) ) and len(lowerCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self :Optional[Any] , lowerCAmelCase__ :tuple[int, int] ) -> Any:
'''simple docstring'''
assert self.validate_indicies(lowerCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self :Tuple , lowerCAmelCase__ :tuple[int, int] , lowerCAmelCase__ :float ) -> None:
'''simple docstring'''
assert self.validate_indicies(lowerCAmelCase__ )
snake_case_ : Any = value
def __add__( self :Dict , lowerCAmelCase__ :Matrix ) -> Matrix:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
snake_case_ : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Optional[int] = self[r, c] + another[r, c]
return result
def __neg__( self :int ) -> Matrix:
'''simple docstring'''
snake_case_ : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Any = -self[r, c]
return result
def __sub__( self :Dict , lowerCAmelCase__ :Matrix ) -> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self :Optional[int] , lowerCAmelCase__ :int | float | Matrix ) -> Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (int, float) ): # Scalar multiplication
snake_case_ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Dict = self[r, c] * another
return result
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
snake_case_ : Any = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case_ : List[Any] = F'''Unsupported type given for another ({type(lowerCAmelCase__ )})'''
raise TypeError(lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> Matrix:
'''simple docstring'''
snake_case_ : str = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Union[str, Any] = self[r, c]
return result
def _A ( self :Dict , lowerCAmelCase__ :Matrix , lowerCAmelCase__ :Matrix ) -> Any:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case_ : Dict = v.transpose()
snake_case_ : int = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : Dict = Matrix(3 ,3 ,0 )
for i in range(3 ):
snake_case_ : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
snake_case_ : int = Matrix(3 ,1 ,0 )
snake_case_, snake_case_, snake_case_ : Optional[Any] = 1, 2, -3
snake_case_ : Dict = Matrix(3 ,1 ,0 )
snake_case_, snake_case_, snake_case_ : Optional[Any] = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__magic_name__ ,__magic_name__ )}''' )
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 656 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 1 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
snake_case : Dict = '''scheduler_config.json'''
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = 1
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : List[Any] = 5
UpperCAmelCase__ : Tuple = 6
UpperCAmelCase__ : str = 7
UpperCAmelCase__ : Optional[int] = 8
UpperCAmelCase__ : Tuple = 9
UpperCAmelCase__ : str = 1_0
UpperCAmelCase__ : Any = 1_1
UpperCAmelCase__ : List[Any] = 1_2
UpperCAmelCase__ : int = 1_3
UpperCAmelCase__ : List[Any] = 1_4
@dataclass
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : torch.FloatTensor
class snake_case_ :
UpperCAmelCase__ : str = SCHEDULER_CONFIG_NAME
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Tuple = True
@classmethod
def lowerCamelCase__( cls :int ,__snake_case :Dict[str, Any] = None ,__snake_case :Optional[str] = None ,__snake_case :Optional[int]=False ,**__snake_case :Union[str, Any] ,) -> Optional[int]:
a__ , a__ , a__ = cls.load_config(
pretrained_model_name_or_path=__snake_case ,subfolder=__snake_case ,return_unused_kwargs=__snake_case ,return_commit_hash=__snake_case ,**__snake_case ,)
return cls.from_config(__snake_case ,return_unused_kwargs=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, os.PathLike] ,__snake_case :bool = False ,**__snake_case :Optional[int] ) -> Union[str, Any]:
self.save_config(save_directory=__snake_case ,push_to_hub=__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
return self._get_compatibles()
@classmethod
def lowerCamelCase__( cls :List[Any] ) -> str:
a__ = list(set([cls.__name__] + cls._compatibles ) )
a__ = importlib.import_module(__name__.split('.' )[0] )
a__ = [
getattr(__snake_case ,__snake_case ) for c in compatible_classes_str if hasattr(__snake_case ,__snake_case )
]
return compatible_classes
| 335 |
from math import factorial
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
a__ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a__ = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 335 | 1 |
import requests
UpperCAmelCase__ = "" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = "https://api.openweathermap.org/data/2.5/"
def A ( _UpperCAmelCase : str = "Chicago" , _UpperCAmelCase : str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def A ( _UpperCAmelCase : str = "Kolkata, India" , _UpperCAmelCase : str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def A ( _UpperCAmelCase : float = 55.68 , _UpperCAmelCase : float = 12.57 , _UpperCAmelCase : str = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 639 |
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639 | 1 |
def UpperCAmelCase_ ( snake_case__ ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = len(snake_case__ )
lowerCAmelCase__ = len(matrix[0] )
lowerCAmelCase__ = min(snake_case__ , snake_case__ )
for row in range(snake_case__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , snake_case__ ):
lowerCAmelCase__ = matrix[col][row] / matrix[row][row]
for i in range(snake_case__ , snake_case__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase__ = True
for i in range(row + 1 , snake_case__ ):
if matrix[i][row] != 0:
lowerCAmelCase__ = matrix[i], matrix[row]
lowerCAmelCase__ = False
break
if reduce:
rank -= 1
for i in range(snake_case__ ):
lowerCAmelCase__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74 | 0 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1e-12 ) ->List[Any]:
lowercase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(SCREAMING_SNAKE_CASE_ , axis=1 ) , a_min=SCREAMING_SNAKE_CASE_ ) ).T
lowercase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(SCREAMING_SNAKE_CASE_ , axis=1 ) , a_min=SCREAMING_SNAKE_CASE_ ) ).T
return jnp.matmul(SCREAMING_SNAKE_CASE_ , norm_emb_a.T )
class _a ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = FlaxCLIPVisionModule(self.config.vision_config )
lowercase_ = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype )
lowercase_ = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowercase_ = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowercase_ = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
lowercase_ = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : str , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.vision_model(lowercase_ )[1]
lowercase_ = self.visual_projection(lowercase_ )
lowercase_ = jax_cosine_distance(lowercase_ , self.special_care_embeds )
lowercase_ = jax_cosine_distance(lowercase_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowercase_ = 0.0
lowercase_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowercase_ = jnp.round(lowercase_ , 3 )
lowercase_ = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_ )
# Use a lower threshold if an image has any special care concept
lowercase_ = is_special_care * 0.0_1
lowercase_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowercase_ = jnp.round(lowercase_ , 3 )
lowercase_ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _a ( __a ):
"""simple docstring"""
A_ = CLIPConfig
A_ = '''clip_input'''
A_ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Optional[int] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
if input_shape is None:
lowercase_ = (1, 224, 224, 3)
lowercase_ = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_ )
super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None ):
'''simple docstring'''
lowercase_ = jax.random.normal(lowercase_ , lowercase_ )
lowercase_ , lowercase_ = jax.random.split(lowercase_ )
lowercase_ = {"""params""": params_rng, """dropout""": dropout_rng}
lowercase_ = self.module.init(lowercase_ , lowercase_ )["""params"""]
return random_params
def __call__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : dict = None , ):
'''simple docstring'''
lowercase_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa ) , rngs={} , )
| 603 | '''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__snake_case = """
import os
"""
__snake_case = """
def foo():
import os
return False
"""
__snake_case = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
__snake_case = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
__snake_case = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
__snake_case = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
__snake_case = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
__snake_case = """
import os
try:
import bar
except:
raise ValueError()
"""
__snake_case = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
__snake_case = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
__snake_case = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , """test_file.py""" )
with open(SCREAMING_SNAKE_CASE_ , """w""" ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE_ )
lowercase_ = get_imports(SCREAMING_SNAKE_CASE_ )
assert parsed_imports == ["os"]
| 603 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase_ : Union[str, Any] = [True] * 1_0_0_0_0_0_1
UpperCAmelCase_ : Optional[Any] = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
UpperCAmelCase_ : Any = False
i += 1
def _UpperCamelCase (_lowerCamelCase : int )-> bool:
'''simple docstring'''
return seive[n]
def _UpperCamelCase (_lowerCamelCase : int )-> bool:
'''simple docstring'''
return any(digit in '''02468''' for digit in str(_lowerCamelCase ) )
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> list[int]:
'''simple docstring'''
__snake_case = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_lowerCamelCase ) and not contains_an_even_digit(_lowerCamelCase ):
__snake_case = str(_lowerCamelCase )
__snake_case = [int(str_num[j:] + str_num[:j] ) for j in range(len(_lowerCamelCase ) )]
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
result.append(_lowerCamelCase )
return result
def _UpperCamelCase ()-> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""")
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0 ) -> Dict:
UpperCAmelCase_ : int = 1.0 if scale is None else scale
UpperCAmelCase_ : Any = 0.0 if loc is None else loc
super().__init__(_SCREAMING_SNAKE_CASE ,[AffineTransform(loc=self.loc ,scale=self.scale ,event_dim=_SCREAMING_SNAKE_CASE )] )
@property
def a__ ( self ) -> str:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self ) -> Dict:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self ) -> Tuple:
return self.variance.sqrt()
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = args_dim
UpperCAmelCase_ : Any = nn.ModuleList([nn.Linear(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
UpperCAmelCase_ : int = domain_map
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple[torch.Tensor]:
UpperCAmelCase_ : int = [proj(_SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*_SCREAMING_SNAKE_CASE )
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
super().__init__()
UpperCAmelCase_ : int = function
def a__ ( self ,_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return self.function(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE )
class __a:
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
def __init__( self ,_SCREAMING_SNAKE_CASE = 1 ) -> None:
UpperCAmelCase_ : Optional[Any] = dim
UpperCAmelCase_ : List[Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
if self.dim == 1:
return self.distribution_class(*_SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*_SCREAMING_SNAKE_CASE ) ,1 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> Distribution:
UpperCAmelCase_ : Dict = self._base_distribution(_SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_SCREAMING_SNAKE_CASE ,loc=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,event_dim=self.event_dim )
@property
def a__ ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self ) -> int:
return len(self.event_shape )
@property
def a__ ( self ) -> float:
return 0.0
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> nn.Module:
return ParameterProjection(
in_features=_SCREAMING_SNAKE_CASE ,args_dim=self.args_dim ,domain_map=LambdaLayer(self.domain_map ) ,)
def a__ ( self ,*_SCREAMING_SNAKE_CASE ) -> List[Any]:
raise NotImplementedError()
@staticmethod
def a__ ( _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(_SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase = StudentT
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Any = cls.squareplus(_SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ : Optional[int] = 2.0 + cls.squareplus(_SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = {"loc": 1, "scale": 1}
lowerCAmelCase = Normal
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = cls.squareplus(_SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = {"total_count": 1, "logits": 1}
lowerCAmelCase = NegativeBinomial
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Any = cls.squareplus(_SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Distribution:
UpperCAmelCase_, UpperCAmelCase_ : int = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_SCREAMING_SNAKE_CASE ,logits=_SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=_SCREAMING_SNAKE_CASE ,logits=_SCREAMING_SNAKE_CASE ) ,1 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ) -> Distribution:
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) ) | 300 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure) | 300 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A : List[Any] = logging.get_logger(__name__)
A : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
A : str = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__UpperCamelCase )[0].split("." )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace("*" , __UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ = "weight"
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE_ = name.split("." )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ = UniSpeechSatConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE_ = ""
if is_finetuned:
SCREAMING_SNAKE_CASE_ = UniSpeechSatForCTC(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = UniSpeechSatForPreTraining(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
SCREAMING_SNAKE_CASE_ = model[0].eval()
recursively_load_weights(__UpperCamelCase , __UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 140 | def a__ ( __UpperCamelCase ):
if length <= 0 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__UpperCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 140 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=128 , UpperCamelCase_=32 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = parent
UpperCamelCase__ :List[str] = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Tuple = is_training
UpperCamelCase__ :Union[str, Any] = use_input_mask
UpperCamelCase__ :List[str] = use_token_type_ids
UpperCamelCase__ :Optional[int] = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Any = hidden_size
UpperCamelCase__ :Any = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :Dict = intermediate_size
UpperCamelCase__ :int = hidden_act
UpperCamelCase__ :Optional[Any] = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :Any = max_position_embeddings
UpperCamelCase__ :Any = type_vocab_size
UpperCamelCase__ :Any = type_sequence_label_size
UpperCamelCase__ :Union[str, Any] = initializer_range
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :List[Any] = num_choices
UpperCamelCase__ :Any = scope
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Dict = None
if self.use_input_mask:
UpperCamelCase__ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ :Tuple = None
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :Dict = None
if self.use_labels:
UpperCamelCase__ :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
(
UpperCamelCase__
) :Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = NezhaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
UpperCamelCase__ :str = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :int = NezhaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :List[str] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
UpperCamelCase__ :Tuple = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
UpperCamelCase__ :Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = NezhaForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = NezhaForNextSentencePrediction(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = NezhaForPreTraining(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , next_sentence_label=UpperCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = NezhaForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :int = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.num_labels
UpperCamelCase__ :Dict = NezhaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :List[Any] = NezhaForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.num_choices
UpperCamelCase__ :List[str] = NezhaForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ :List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ :List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ :Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ :Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.prepare_config_and_inputs()
(
UpperCamelCase__
) :Union[str, Any] = config_and_inputs
UpperCamelCase__ :Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_a = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
UpperCamelCase__ :int = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
UpperCamelCase__ :List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = NezhaModelTester(self )
UpperCamelCase__ :List[Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
(
UpperCamelCase__
) :Any = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :str = NezhaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Union[str, Any] = model_class(config=UpperCamelCase_ )
UpperCamelCase__ :Dict = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Tuple = torch.jit.trace(
UpperCamelCase_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''bert.pt''' ) )
UpperCamelCase__ :Any = torch.jit.load(os.path.join(UpperCamelCase_ , '''bert.pt''' ) , map_location=UpperCamelCase_ )
loaded(inputs_dict['''input_ids'''].to(UpperCamelCase_ ) , inputs_dict['''attention_mask'''].to(UpperCamelCase_ ) )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
UpperCamelCase__ :List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ :Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ :Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
UpperCamelCase__ :int = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
UpperCamelCase__ :str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ :Tuple = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ :Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
UpperCamelCase__ :int = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , UpperCamelCase_ )
UpperCamelCase__ :int = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1e-4 ) ) | 702 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__snake_case = logging.get_logger(__name__)
class lowercase :
"""simple docstring"""
_a = 42
_a = None
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCAmelCase__ ( cls ):
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class lowercase ( A__ ):
"""simple docstring"""
_a = 'optuna'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return run_hp_search_optuna(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return default_hp_space_optuna(UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 'ray'
_a = '\'ray[tune]\''
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return run_hp_search_ray(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return default_hp_space_ray(UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 'sigopt'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return run_hp_search_sigopt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return default_hp_space_sigopt(UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 'wandb'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return run_hp_search_wandb(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return default_hp_space_wandb(UpperCamelCase_ )
__snake_case = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__a ) > 0:
UpperCamelCase__ :Tuple = available_backends[0].name
if len(__a ) > 1:
logger.info(
f'''{len(__a )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) ) | 280 | 0 |
'''simple docstring'''
from math import isqrt
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Any = False
return [i for i in range(2 , UpperCAmelCase_ ) if is_prime[i]]
def A__ ( UpperCAmelCase_ = 1_0**8 ):
_UpperCamelCase : Tuple = calculate_prime_numbers(max_number // 2 )
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = len(UpperCAmelCase_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 195 |
'''simple docstring'''
from typing import List
import numpy as np
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Any = {key: len(UpperCAmelCase_ ) for key, value in gen_kwargs.items() if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
_UpperCamelCase : Tuple = max(lists_lengths.values() , default=0 )
return max(1 , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = []
for group_idx in range(UpperCAmelCase_ ):
_UpperCamelCase : int = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_UpperCamelCase : Tuple = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_UpperCamelCase : Tuple = range(UpperCAmelCase_ , start + num_shards_to_add )
shards_indices_per_group.append(UpperCAmelCase_ )
return shards_indices_per_group
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
if num_shards == 1:
return [dict(UpperCAmelCase_ )]
else:
_UpperCamelCase : str = _distribute_shards(num_shards=UpperCAmelCase_ , max_num_jobs=UpperCAmelCase_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(UpperCAmelCase_ ) )
]
def A__ ( UpperCAmelCase_ ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , UpperCAmelCase_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = {len(UpperCAmelCase_ ) for value in gen_kwargs.values() if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )}
_UpperCamelCase : Union[str, Any] = {}
for size in list_sizes:
_UpperCamelCase : str = list(range(UpperCAmelCase_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_UpperCamelCase : Union[str, Any] = dict(UpperCAmelCase_ )
for key, value in shuffled_kwargs.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = [value[i] for i in indices_per_size[len(UpperCAmelCase_ )]]
return shuffled_kwargs
| 195 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase__ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
UpperCamelCase__ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class a__ ( snake_case__ ):
_a : List[Any] = VOCAB_FILES_NAMES
_a : List[str] = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_INIT_CONFIGURATION
_a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : List[str] = RealmTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _A ) != do_lower_case
or normalizer_state.get("strip_accents" , _A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _A ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(_A , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**_A )
__lowerCAmelCase = do_lower_case
def __SCREAMING_SNAKE_CASE( self , _A , **_A ):
"""simple docstring"""
__lowerCAmelCase = PaddingStrategy.MAX_LENGTH
__lowerCAmelCase = text
__lowerCAmelCase = kwargs.pop("text_pair" , _A )
__lowerCAmelCase = kwargs.pop("return_tensors" , _A )
__lowerCAmelCase = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(_A ):
if batch_text_pair is not None:
__lowerCAmelCase = batch_text_pair[idx]
else:
__lowerCAmelCase = None
__lowerCAmelCase = super().__call__(_A , _A , return_tensors=_A , **_A )
__lowerCAmelCase = encoded_candidates.get("input_ids" )
__lowerCAmelCase = encoded_candidates.get("attention_mask" )
__lowerCAmelCase = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_A )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_A )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_A )
__lowerCAmelCase = {key: item for key, item in output_data.items() if len(_A ) != 0}
return BatchEncoding(_A , tensor_type=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A=None ):
"""simple docstring"""
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 552 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 552 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 552 |
from __future__ import annotations
from collections import Counter
from random import random
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = {}
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = {}
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = probability
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
return list(self.connections )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> dict[str, int]:
UpperCamelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase , _lowercase , _lowercase )
UpperCamelCase = Counter(graph.get_nodes() )
UpperCamelCase = start
for _ in range(_lowercase ):
UpperCamelCase = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCamelCase__ ):
for x in range(UpperCamelCase__ ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCAmelCase__ =imread("../image_data/lena.jpg")
# turn image in gray scale value
UpperCAmelCase__ =cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCAmelCase__ =np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCAmelCase__ =gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCAmelCase__ =out / out.max() * 255
UpperCAmelCase__ =out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 721 |
"""simple docstring"""
import inspect
import unittest
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__lowercase = inspect.getmembers(A_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__lowercase = """k-diffusion"""
elif backend == "invisible_watermark":
__lowercase = """invisible-watermark"""
assert backend in deps, F'''{backend} is not in the deps table!'''
| 442 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def __A ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCAmelCase_ = "xvjiarui/stable-diffusion-2-inpainting"
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase , safety_checker=lowerCAmelCase )
UpperCAmelCase_ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCAmelCase_ = jax.random.PRNGKey(0 )
UpperCAmelCase_ = 50
UpperCAmelCase_ = jax.device_count()
UpperCAmelCase_ = num_samples * [prompt]
UpperCAmelCase_ = num_samples * [init_image]
UpperCAmelCase_ = num_samples * [mask_image]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = pipeline.prepare_inputs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# shard inputs and rng
UpperCAmelCase_ = replicate(lowerCAmelCase )
UpperCAmelCase_ = jax.random.split(lowerCAmelCase , jax.device_count() )
UpperCAmelCase_ = shard(lowerCAmelCase )
UpperCAmelCase_ = shard(lowerCAmelCase )
UpperCAmelCase_ = shard(lowerCAmelCase )
UpperCAmelCase_ = pipeline(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase )
UpperCAmelCase_ = output.images.reshape(lowerCAmelCase , 512 , 512 , 3 )
UpperCAmelCase_ = images[0, 253:256, 253:256, -1]
UpperCAmelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase_ = jnp.array(
[0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 162 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a: Any = logging.get_logger(__name__)
_a: int = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = 'table-transformer'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
SCREAMING_SNAKE_CASE__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Optional[Any]=100 , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : List[Any]=2_048 , lowerCAmelCase : str=8 , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : List[str]=2_048 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : str=0.0 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : List[str]="relu" , lowerCAmelCase : Optional[Any]=256 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Tuple=1.0 , lowerCAmelCase : Dict=False , lowerCAmelCase : str="sine" , lowerCAmelCase : Optional[Any]="resnet50" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : Dict=1 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Dict=2 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : Dict=5 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : str=0.1 , **lowerCAmelCase : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = backbone_config.get("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(lowerCAmelCase )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None, None, None
UpperCAmelCase_ = use_timm_backbone
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = backbone
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = dilation
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def __A ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return self.d_model
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return 1e-5
@property
def __A ( self : Tuple ):
'''simple docstring'''
return 12 | 162 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ : Dict = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : int , a__ : Optional[Any]=3 , a__ : Tuple=32 , a__ : Optional[int]=3 , a__ : Optional[int]=10 , a__ : Optional[Any]=[8, 16, 32, 64] , a__ : List[str]=[1, 1, 2, 1] , a__ : Optional[Any]=True , a__ : Optional[Any]=True , a__ : Optional[int]="relu" , a__ : Any=3 , a__ : int=None , a__ : List[str]=["stage2", "stage3", "stage4"] , a__ : Optional[int]=[2, 3, 4] , a__ : Optional[Any]=1 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(a__ )
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
UpperCAmelCase = num_groups
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Dict ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __snake_case ( self : Optional[Any] , a__ : str , a__ : Optional[int] , a__ : Union[str, Any] ):
UpperCAmelCase = BitModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : int , a__ : List[Any] , a__ : Optional[Any] , a__ : Tuple ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = BitForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict , a__ : Optional[int] , a__ : str , a__ : int ):
UpperCAmelCase = BitBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase = None
UpperCAmelCase = BitBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCamelCase =(
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : List[str] ):
UpperCAmelCase = BitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def __snake_case ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Optional[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __snake_case ( self : Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __snake_case ( self : List[str] ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __snake_case ( self : List[str] ):
pass
def __snake_case ( self : List[Any] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def __snake_case ( self : int ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=a__ )
for name, module in model.named_modules():
if isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __snake_case ( self : int ):
def check_hidden_states_output(a__ : List[str] , a__ : Tuple , a__ : str ):
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase = layer_type
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __snake_case ( self : List[str] ):
pass
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __snake_case ( self : int ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = BitModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Dict ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**a__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
UpperCAmelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(BitBackbone,) if is_torch_available() else ()
_lowerCamelCase =BitConfig
_lowerCamelCase =False
def __snake_case ( self : Dict ):
UpperCAmelCase = BitModelTester(self )
| 570 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ :Optional[int] = logging.get_logger(__name__)
class snake_case ( __a ):
'''simple docstring'''
_A : Dict = ['pixel_values']
def __init__( self : Union[str, Any] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase_ )
__UpperCAmelCase : Dict = size if size is not None else {'''shortest_edge''': 256}
__UpperCAmelCase : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
__UpperCAmelCase : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCAmelCase : Optional[int] = get_size_dict(lowercase_ )
__UpperCAmelCase : Optional[Any] = do_resize
__UpperCAmelCase : int = size
__UpperCAmelCase : Union[str, Any] = resample
__UpperCAmelCase : Optional[int] = do_center_crop
__UpperCAmelCase : Tuple = crop_size
__UpperCAmelCase : List[Any] = do_rescale
__UpperCAmelCase : Tuple = rescale_factor
__UpperCAmelCase : Optional[int] = do_normalize
__UpperCAmelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : List[str] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__UpperCAmelCase : int = get_resize_output_image_size(lowercase_ , size=size['''shortest_edge'''] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = get_size_dict(lowercase_ )
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any ):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Any , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : str , ):
'''simple docstring'''
__UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Union[str, Any] = size if size is not None else self.size
__UpperCAmelCase : Dict = get_size_dict(lowercase_ , default_to_square=lowercase_ )
__UpperCAmelCase : List[Any] = resample if resample is not None else self.resample
__UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Any = get_size_dict(lowercase_ )
__UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : int = image_std if image_std is not None else self.image_std
__UpperCAmelCase : str = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase : str = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
__UpperCAmelCase : Optional[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
__UpperCAmelCase : Optional[Any] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
__UpperCAmelCase : List[Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
__UpperCAmelCase : str = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
__UpperCAmelCase : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
__UpperCAmelCase : List[str] = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) | 522 | '''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case = 10
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if array[i] == target:
return i
return -1
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = 0
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowercase_ = one_third - 1
elif array[two_third] < target:
lowercase_ = two_third + 1
else:
lowercase_ = one_third + 1
lowercase_ = two_third - 1
else:
return -1
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE_ , one_third - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__snake_case = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case = ite_ternary_search(collection, target)
__snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 451 | 0 |
_UpperCamelCase: str =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _a ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = [False] * len(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [s]
_lowerCAmelCase = True
while queue:
_lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = True
_lowerCAmelCase = u
return visited[t]
def _a ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = [-1] * (len(__SCREAMING_SNAKE_CASE ))
_lowerCAmelCase = 0
_lowerCAmelCase = []
_lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = float('Inf' )
_lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase = min(__SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
_lowerCAmelCase = parent[s]
max_flow += path_flow
_lowerCAmelCase = sink
while v != source:
_lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase = parent[v]
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 585 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __lowercase( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : int=30 , _lowerCAmelCase : Optional[Any]=400 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , _lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=1 / 255 , _lowerCAmelCase : int=True , ) -> Union[str, Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_pad
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Dict=False ) -> Dict:
if not batched:
_lowerCAmelCase = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image.size
else:
_lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase = int(self.size['shortest_edge'] * h / w )
_lowerCAmelCase = self.size['shortest_edge']
elif w > h:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = self.size['shortest_edge']
else:
_lowerCAmelCase = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
_lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
_lowerCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
_lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[int]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
# prepare image and target
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_lowerCAmelCase = json.loads(f.read() )
_lowerCAmelCase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowerCAmelCase = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
_lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify area
_lowerCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) )
# verify boxes
_lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) )
# verify is_crowd
_lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) )
# verify class_labels
_lowerCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) )
# verify orig_size
_lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) )
# verify size
_lowerCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
# prepare image, target and masks_path
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_lowerCAmelCase = json.loads(f.read() )
_lowerCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowerCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowerCAmelCase = ConditionalDetrImageProcessor(format='coco_panoptic' )
_lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , masks_path=_lowerCAmelCase , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify area
_lowerCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) )
# verify boxes
_lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) )
# verify is_crowd
_lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) )
# verify class_labels
_lowerCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) )
# verify masks
_lowerCAmelCase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _lowerCAmelCase )
# verify orig_size
_lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) )
# verify size
_lowerCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
| 585 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 |
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ :int ):
UpperCAmelCase_ = [True] * limit
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCAmelCase_ = i * 2
while index < limit:
UpperCAmelCase_ = False
UpperCAmelCase_ = index + i
UpperCAmelCase_ = [2]
for i in range(3 , __magic_name__ , 2 ):
if is_prime[i]:
primes.append(__magic_name__ )
return primes
def _lowerCAmelCase ( __magic_name__ :int = 1_0_0_0_0_0_0 ):
UpperCAmelCase_ = prime_sieve(__magic_name__ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(len(__magic_name__ ) ):
for j in range(i + length , len(__magic_name__ ) ):
UpperCAmelCase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCAmelCase_ = j - i
UpperCAmelCase_ = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 121 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ : Optional[int] = 16
UpperCamelCase__ : Optional[Any] = 32
def lowerCAmelCase_ ( _lowerCamelCase: Accelerator , _lowerCamelCase: int = 16 ):
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__SCREAMING_SNAKE_CASE : Tuple = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_lowerCamelCase: Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Any = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE : Dict = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCamelCase: Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE : str = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE : Optional[int] = 8
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
return tokenizer.pad(
_lowerCamelCase , padding="""longest""" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ : str = mocked_dataloaders # noqa: F811
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: List[Any] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _lowerCamelCase ) == "1":
__SCREAMING_SNAKE_CASE : List[str] = 2
# Initialize accelerator
__SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE : int = config["""lr"""]
__SCREAMING_SNAKE_CASE : List[str] = int(config["""num_epochs"""] )
__SCREAMING_SNAKE_CASE : List[Any] = int(config["""seed"""] )
__SCREAMING_SNAKE_CASE : str = int(config["""batch_size"""] )
__SCREAMING_SNAKE_CASE : str = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__SCREAMING_SNAKE_CASE : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__SCREAMING_SNAKE_CASE : int = batch_size // MAX_GPU_BATCH_SIZE
__SCREAMING_SNAKE_CASE : str = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE : List[str] = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE : List[Any] = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE : List[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE : List[Any] = model(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = outputs.loss
__SCREAMING_SNAKE_CASE : List[str] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__SCREAMING_SNAKE_CASE : List[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__SCREAMING_SNAKE_CASE : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
__SCREAMING_SNAKE_CASE : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main() | 178 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 178 | 1 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
A__ = logging.get_logger(__name__)
class _lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
def __init__( self : str , *__snake_case : List[Any] , **__snake_case : Union[str, Any] ):
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 166 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__A : Union[str, Any] = TypeVar("KEY")
__A : Union[str, Any] = TypeVar("VAL")
@dataclass(frozen=_SCREAMING_SNAKE_CASE ,slots=_SCREAMING_SNAKE_CASE)
class __snake_case ( Generic[KEY, VAL]):
"""simple docstring"""
lowercase = 42
lowercase = 42
class __snake_case ( _Item):
"""simple docstring"""
def __init__( self : int ) -> None:
super().__init__(lowerCamelCase , lowerCamelCase )
def __bool__( self : Tuple ) -> bool:
return False
__A : Any = _DeletedItem()
class __snake_case ( MutableMapping[KEY, VAL]):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : int = 8 , lowerCamelCase : float = 0.75 ) -> None:
lowerCAmelCase_ : str = initial_block_size
lowerCAmelCase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase_ : Optional[Any] = capacity_factor
lowerCAmelCase_ : List[Any] = 0
def __lowercase ( self : List[Any] , lowerCamelCase : KEY ) -> int:
return hash(lowerCamelCase ) % len(self._buckets )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> int:
return (ind + 1) % len(self._buckets )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : KEY , lowerCamelCase : VAL ) -> bool:
lowerCAmelCase_ : Union[str, Any] = self._buckets[ind]
if not stored:
lowerCAmelCase_ : List[Any] = _Item(lowerCamelCase , lowerCamelCase )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase_ : List[str] = _Item(lowerCamelCase , lowerCamelCase )
return True
else:
return False
def __lowercase ( self : Optional[Any] ) -> bool:
lowerCAmelCase_ : str = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase )
def __lowercase ( self : Dict ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __lowercase ( self : Tuple , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : Union[str, Any] = self._buckets
lowerCAmelCase_ : str = [None] * new_size
lowerCAmelCase_ : str = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __lowercase ( self : Tuple ) -> None:
self._resize(len(self._buckets ) * 2 )
def __lowercase ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __lowercase ( self : Optional[int] , lowerCamelCase : KEY ) -> Iterator[int]:
lowerCAmelCase_ : int = self._get_bucket_index(lowerCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase_ : Dict = self._get_next_ind(lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : KEY , lowerCamelCase : VAL ) -> None:
for ind in self._iterate_buckets(lowerCamelCase ):
if self._try_set(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
break
def __setitem__( self : Tuple , lowerCamelCase : KEY , lowerCamelCase : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase , lowerCamelCase )
def __delitem__( self : Optional[int] , lowerCamelCase : KEY ) -> None:
for ind in self._iterate_buckets(lowerCamelCase ):
lowerCAmelCase_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase_ : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Any , lowerCamelCase : KEY ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase ):
lowerCAmelCase_ : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase )
def __len__( self : Any ) -> int:
return self._len
def __iter__( self : Optional[Any] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[Any] ) -> str:
lowerCAmelCase_ : int = """ ,""".join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 275 | 0 |
"""simple docstring"""
import argparse
import json
import subprocess
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
UpperCAmelCase = subprocess.run(lowerCAmelCase , shell=lowerCAmelCase , stdout=subprocess.PIPE )
UpperCAmelCase = output.stdout.decode("""utf-8""" )
UpperCAmelCase = json.loads(lowerCAmelCase )
UpperCAmelCase = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowerCAmelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) )
if len(lowerCAmelCase ) > 0:
UpperCAmelCase = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return values.split(""",""" )
lowerCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
lowerCAmelCase_ : str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 711 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 378 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ : int = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
UpperCamelCase__ : Optional[Any] = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ : Any = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCamelCase__ : Optional[int] = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = 2
def __init__( self , A__=None , A__=None , A__=True , A__="<unk>" , A__="<sep>" , A__="<pad>" , A__="<cls>" , A__="<mask>" , A__="<s>" , A__="</s>" , A__=True , A__=True , A__=None , A__="##" , **A__ , ) -> Any:
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , bos_token=A__ , eos_token=A__ , clean_text=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , wordpieces_prefix=A__ , **A__ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A__ ) != tokenize_chinese_chars
):
_SCREAMING_SNAKE_CASE = getattr(A__ , normalizer_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = do_lower_case
_SCREAMING_SNAKE_CASE = strip_accents
_SCREAMING_SNAKE_CASE = tokenize_chinese_chars
_SCREAMING_SNAKE_CASE = normalizer_class(**A__ )
_SCREAMING_SNAKE_CASE = do_lower_case
def UpperCamelCase ( self , A__ , A__=None ) -> str:
_SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 591 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=2 , A__=32 , A__=16 , A__=3 , A__=True , A__=True , A__=32 , A__=4 , A__=[0, 1, 2, 3] , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=0.02 , A__=3 , A__=[1, 3_84, 24, 24] , A__=True , A__=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = backbone_out_indices
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = backbone_featmap_shape
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = DPTModel(config=A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = DPTForDepthEstimation(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(A__ )
model.to(A__ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A__ , labels=A__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = DPTModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def UpperCamelCase ( self ) -> List[str]:
pass
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A__ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A__ )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = True
if model_class in get_values(A__ ):
continue
_SCREAMING_SNAKE_CASE = model_class(A__ )
model.to(A__ )
model.train()
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ , return_labels=A__ )
_SCREAMING_SNAKE_CASE = model(**A__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
if model_class in get_values(A__ ) or not model_class.supports_gradient_checkpointing:
continue
_SCREAMING_SNAKE_CASE = model_class(A__ )
model.to(A__ )
model.gradient_checkpointing_enable()
model.train()
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A__ , A__ , return_labels=A__ )
_SCREAMING_SNAKE_CASE = model(**A__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = _config_zero_init(A__ )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=A__ )
# Skip the check for the backbone
_SCREAMING_SNAKE_CASE = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self ) -> Any:
pass
@slow
def UpperCamelCase ( self ) -> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_SCREAMING_SNAKE_CASE = DPTModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCamelCase ( self ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = """add"""
with self.assertRaises(A__ ):
_SCREAMING_SNAKE_CASE = DPTForDepthEstimation(A__ )
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_SCREAMING_SNAKE_CASE = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(A__ )
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**A__ )
_SCREAMING_SNAKE_CASE = outputs.predicted_depth
# verify the predicted depth
_SCREAMING_SNAKE_CASE = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , A__ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , A__ , atol=1E-4 ) )
| 591 | 1 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__magic_name__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__magic_name__ = [0, 25, 50]
__magic_name__ = [25, 50, 75]
__magic_name__ = fuzz.membership.trimf(X, abca)
__magic_name__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__magic_name__ = np.ones(75)
__magic_name__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__magic_name__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__magic_name__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__magic_name__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__magic_name__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__magic_name__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__magic_name__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__magic_name__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__magic_name__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 716 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="attention" ) -> str:
'''simple docstring'''
lowerCamelCase__: str = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowerCamelCase__: Any = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowerCamelCase__: Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowerCamelCase__: int = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ) -> Tuple:
'''simple docstring'''
if split_mlp_wi:
lowerCamelCase__: Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowerCamelCase__: Optional[int] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowerCamelCase__: Any = (wi_a, wi_a)
else:
lowerCamelCase__: List[str] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowerCamelCase__: Union[str, Any] = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
'''simple docstring'''
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def __lowerCAmelCase ( _UpperCamelCase , *, _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = traverse_util.flatten_dict(variables["""target"""] )
lowerCamelCase__: int = {"""/""".join(_UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase__: Optional[int] = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , _UpperCamelCase )
lowerCamelCase__: Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase__: Any = old["""token_embedder/embedding"""]
# Encoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase__: Union[str, Any] = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , """encoder""" , """pre_attention_layer_norm""" )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , """encoder""" , """attention""" )
lowerCamelCase__: Dict = layer_norm
lowerCamelCase__: int = k.T
lowerCamelCase__: Tuple = o.T
lowerCamelCase__: Union[str, Any] = q.T
lowerCamelCase__: Dict = v.T
# Block i, layer 1 (MLP).
lowerCamelCase__: Union[str, Any] = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , """encoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase__ , lowerCamelCase__: Any = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , """encoder""" , _UpperCamelCase )
lowerCamelCase__: Any = layer_norm
if split_mlp_wi:
lowerCamelCase__: int = wi[0].T
lowerCamelCase__: List[str] = wi[1].T
else:
lowerCamelCase__: Union[str, Any] = wi.T
lowerCamelCase__: Dict = wo.T
lowerCamelCase__: List[str] = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCamelCase__: Tuple = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase__: int = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , """decoder""" , """self_attention""" )
lowerCamelCase__: Optional[Any] = layer_norm
lowerCamelCase__: Any = k.T
lowerCamelCase__: Tuple = o.T
lowerCamelCase__: Union[str, Any] = q.T
lowerCamelCase__: Dict = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase__: str = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , """decoder""" , """encoder_decoder_attention""" )
lowerCamelCase__: Tuple = layer_norm
lowerCamelCase__: Optional[int] = k.T
lowerCamelCase__: List[str] = o.T
lowerCamelCase__: List[str] = q.T
lowerCamelCase__: List[Any] = v.T
# Block i, layer 2 (MLP).
lowerCamelCase__: List[Any] = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , """decoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase__ , lowerCamelCase__: Tuple = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , """decoder""" , _UpperCamelCase )
lowerCamelCase__: Dict = layer_norm
if split_mlp_wi:
lowerCamelCase__: int = wi[0].T
lowerCamelCase__: List[str] = wi[1].T
else:
lowerCamelCase__: int = wi.T
lowerCamelCase__: str = wo.T
lowerCamelCase__: List[Any] = old["""decoder/decoder_norm/scale"""]
lowerCamelCase__: Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase__: int = old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase__: int = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase__: int = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCamelCase__: Optional[int] = state_dict["""shared.weight"""]
return state_dict
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
'''simple docstring'''
lowerCamelCase__: int = checkpoints.load_tax_checkpoint(_UpperCamelCase )
lowerCamelCase__: str = convert_tax_to_pytorch(_UpperCamelCase , num_layers=config.num_layers , is_encoder_only=_UpperCamelCase )
lowerCamelCase__: str = make_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> Any:
'''simple docstring'''
lowerCamelCase__: List[Any] = TaConfig.from_json_file(_UpperCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase__: Optional[Any] = TaEncoderModel(_UpperCamelCase )
else:
lowerCamelCase__: Any = TaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCamelCase )
print("""Done""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
_lowercase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 306 |
from __future__ import annotations
from math import pi, sqrt
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 | 1 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase ( a_=32, a_=10, a_=100, a_=1026, a_=True, a_="data/tokenized_stories_train_wikitext103.jbl", a_="igf_context_pairs.jbl", ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
lowerCamelCase : Any = generate_datasets(
a_, a_, number=a_, min_len=1026, trim=a_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCamelCase : Tuple = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
lowerCamelCase : List[str] = load_gpta('gpt2' ).to(a_ )
print('computing perplexity on objective set' )
lowerCamelCase : Tuple = compute_perplexity(a_, a_, a_ ).item()
print('perplexity on objective set:', a_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a_, a_, a_, a_, a_, a_, a_, a_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase ( a_, a_=15, a_=128, a_=100, a_="igf_model.pt", ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
lowerCamelCase : Tuple = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
lowerCamelCase : List[Any] = SecondaryLearner(a_ )
# Train secondary learner
lowerCamelCase : Tuple = train_secondary_learner(
a_, a_, max_epochs=a_, batch_size=a_, eval_freq=100, igf_model_path=a_, )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase ( a_, a_, a_, a_=32, a_=1000, a_=16, a_=1.0, a_=recopy_gpta, a_=None, a_=10, a_="gpt2_finetuned.pt", ):
'''simple docstring'''
lowerCamelCase : int = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
lowerCamelCase : Any = RandomSampler(a_ )
lowerCamelCase : Any = DataLoader(a_, sampler=a_ )
lowerCamelCase : Optional[Any] = max_steps // (len(a_ )) + 1
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : str = torch.zeros((1, context_len), dtype=torch.long, device=a_ )
lowerCamelCase : List[str] = recopy_model(a_, a_, a_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a_ )
secondary_learner.eval()
lowerCamelCase : Optional[int] = []
lowerCamelCase : Any = 0
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Union[str, Any] = []
# Compute the performance of the transformer model at the beginning
lowerCamelCase : Optional[Any] = compute_perplexity(a_, a_, a_ )
test_perps.append(a_ )
print('Test perplexity, step', a_, ':', a_ )
for epoch in range(int(a_ ) ):
for step, example in enumerate(a_ ):
torch.cuda.empty_cache()
lowerCamelCase : List[str] = random.randint(0, example.size(2 ) - context_len - 1 )
lowerCamelCase : Dict = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCamelCase : Optional[Any] = model(a_, labels=a_ )
lowerCamelCase : Optional[Any] = True
if secondary_learner is not None:
lowerCamelCase : Optional[Any] = secondary_learner.forward(
torch.tensor(a_, dtype=torch.long, device=a_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCamelCase : str = -1
if predicted_q < threshold:
lowerCamelCase : Tuple = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCamelCase : int = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCamelCase : Optional[int] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCamelCase : int = compute_perplexity(a_, a_, a_ )
test_perps.append(a_ )
print('Test perplexity, step', a_, ':', a_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict(), a_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir', default=a_, type=a_, required=a_, help='The input data dir. Should contain data files for WikiText.', )
parser.add_argument(
'--model_name_or_path', default=a_, type=a_, required=a_, help='Path to pretrained model or model identifier from huggingface.co/models', )
parser.add_argument(
'--data_file', type=a_, default=a_, help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
), )
parser.add_argument(
'--igf_data_file', type=a_, default=a_, help='A jbl file containing the context and information gain pairs to train secondary learner.', )
parser.add_argument(
'--output_dir', default=a_, type=a_, required=a_, help='The output directory where the final fine-tuned model is stored.', )
parser.add_argument(
'--tokenizer_name', default=a_, type=a_, help='Pretrained tokenizer name or path if not the same as model_name', )
parser.add_argument('--seed', type=a_, default=a_, help='A seed for reproducible training.' )
parser.add_argument(
'--context_len', default=32, type=a_, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--size_objective_set', default=100, type=a_, help='number of articles that are long enough to be used as our objective set', )
parser.add_argument(
'--eval_freq', default=100, type=a_, help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps', default=1000, type=a_, help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size', default=128, type=a_, help='batch size of training data for secondary learner', )
parser.add_argument(
'--batch_size', default=16, type=a_, help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval', default=10, type=a_, help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
), )
parser.add_argument(
'--number', default=100, type=a_, help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len', default=1026, type=a_, help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs', default=15, type=a_, help='number of epochs to train secondary learner' )
parser.add_argument('--trim', default=a_, type=a_, help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold', default=1.0, type=a_, help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
), )
parser.add_argument('--finetuned_model_name', default='gpt2_finetuned.pt', type=a_, help='finetuned_model_name' )
parser.add_argument(
'--recopy_model', default=a_, type=a_, help='Reset the model to the original pretrained GPT-2 weights after each iteration', )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32, max_steps=10, size_objective_set=100, min_len=1026, trim=a_, data_file='data/tokenized_stories_train_wikitext103.jbl', igf_data_file='igf_context_pairs.jbl', )
# Load train data for secondary learner
lowerCamelCase : str = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
lowerCamelCase : Tuple = training_secondary_learner(
a_, secondary_learner_max_epochs=15, secondary_learner_batch_size=128, eval_freq=100, igf_model_path='igf_model.pt', )
# load pretrained gpt2 model
lowerCamelCase : Union[str, Any] = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCamelCase : int = generate_datasets(
context_len=32, file='data/tokenized_stories_train_wikitext103.jbl', number=100, min_len=1026, trim=a_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a_, a_, a_, context_len=32, max_steps=1000, batch_size=16, threshold=1.0, recopy_model=a_, secondary_learner=a_, eval_interval=10, finetuned_model_name='gpt2_finetuned.pt', )
if __name__ == "__main__":
main()
| 705 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ = KandinskyInpaintPipeline
lowercase_ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowercase_ = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowercase_ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase_ = False
@property
def _UpperCamelCase ( self ) -> List[str]:
return 32
@property
def _UpperCamelCase ( self ) -> Any:
return 32
@property
def _UpperCamelCase ( self ) -> Any:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> str:
return 100
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _UpperCamelCase ( self ) -> Any:
torch.manual_seed(0 )
lowerCamelCase : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCamelCase : List[Any] = MultilingualCLIP(UpperCAmelCase_ )
lowerCamelCase : List[Any] = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCamelCase : List[Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase : Tuple = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def _UpperCamelCase ( self ) -> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = self.dummy_text_encoder
lowerCamelCase : List[str] = self.dummy_tokenizer
lowerCamelCase : int = self.dummy_unet
lowerCamelCase : Union[str, Any] = self.dummy_movq
lowerCamelCase : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCAmelCase_ , )
lowerCamelCase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ) -> Tuple:
lowerCamelCase : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCAmelCase_ )
# create init_image
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase : Dict = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
lowerCamelCase : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
lowerCamelCase : Union[str, Any] = 0
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCamelCase : Optional[int] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase : List[str] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : Any = 'cpu'
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : List[str] = self.pipeline_class(**UpperCAmelCase_ )
lowerCamelCase : Tuple = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : int = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
lowerCamelCase : int = output.images
lowerCamelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
lowerCamelCase : Any = image[0, -3:, -3:, -1]
lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : Dict = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _UpperCamelCase ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase : Union[str, Any] = np.ones((768, 768) , dtype=np.floataa )
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Optional[int] = 'a hat'
lowerCamelCase : Dict = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
lowerCamelCase : Optional[int] = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase , lowerCamelCase : Tuple = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCamelCase : Optional[Any] = pipeline(
UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
lowerCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 133 | 0 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase_ = tuple[int, int]
class _SCREAMING_SNAKE_CASE:
def __init__( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Any = []
SCREAMING_SNAKE_CASE__ :Any = set()
def __lowerCamelCase ( self : int ) -> Optional[int]:
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def __lowerCamelCase ( self : Optional[int] ) -> Any:
return len(self.elements ) == 0
def __lowerCamelCase ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : List[str] ) -> Any:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE__ :Tuple = []
(SCREAMING_SNAKE_CASE__) :Any = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(SCREAMING_SNAKE_CASE__) :Dict = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : str ) -> List[str]:
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE__ :Any = []
(SCREAMING_SNAKE_CASE__) :Optional[int] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(SCREAMING_SNAKE_CASE__) :List[str] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def __lowerCamelCase ( self : int ) -> Dict:
return self.elements[0][1]
def __lowerCamelCase ( self : Optional[Any] ) -> int:
(SCREAMING_SNAKE_CASE__) :Optional[Any] = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def lowerCamelCase ( UpperCAmelCase__ : TPos , UpperCAmelCase__ : TPos ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = np.array(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = np.array(lowerCamelCase__ )
return np.linalg.norm(a - b )
def lowerCamelCase ( UpperCAmelCase__ : TPos , UpperCAmelCase__ : TPos ) -> Optional[int]:
'''simple docstring'''
return consistent_heuristic(lowerCamelCase__ , lowerCamelCase__ ) // t
def lowerCamelCase ( UpperCAmelCase__ : TPos , UpperCAmelCase__ : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCamelCase ( UpperCAmelCase__ : TPos , UpperCAmelCase__ : int , UpperCAmelCase__ : TPos , UpperCAmelCase__ : dict[TPos, float] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = g_function[start] + Wa * heuristics[i](lowerCamelCase__ , lowerCamelCase__ )
return ans
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = np.chararray((n, n) )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ :int = "*"
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE__ :Any = "#"
SCREAMING_SNAKE_CASE__ :int = "-"
SCREAMING_SNAKE_CASE__ :List[str] = back_pointer[goal]
while x != start:
(SCREAMING_SNAKE_CASE__) :Any = x
# print(x)
SCREAMING_SNAKE_CASE__ :Any = "-"
SCREAMING_SNAKE_CASE__ :List[Any] = back_pointer[x]
SCREAMING_SNAKE_CASE__ :Dict = "-"
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE__ :str = back_pointer[goal]
while x != start:
print(lowerCamelCase__ , end=' ' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = back_pointer[x]
print(lowerCamelCase__ )
sys.exit()
def lowerCamelCase ( UpperCAmelCase__ : TPos ) -> List[str]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , ) -> Dict:
'''simple docstring'''
for itera in range(lowerCamelCase__ ):
open_list[itera].remove_element(lowerCamelCase__ )
# print("s", s)
# print("j", j)
(SCREAMING_SNAKE_CASE__) :List[str] = s
SCREAMING_SNAKE_CASE__ :int = (x - 1, y)
SCREAMING_SNAKE_CASE__ :Optional[Any] = (x + 1, y)
SCREAMING_SNAKE_CASE__ :Union[str, Any] = (x, y + 1)
SCREAMING_SNAKE_CASE__ :Tuple = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ :Tuple = -1
SCREAMING_SNAKE_CASE__ :Optional[Any] = float('inf' )
if valid(lowerCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE__ :Any = g_function[s] + 1
SCREAMING_SNAKE_CASE__ :List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase__ , key(lowerCamelCase__ , 0 , lowerCamelCase__ , lowerCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase__ ):
if key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) <= Wa * key(
lowerCamelCase__ , 0 , lowerCamelCase__ , lowerCamelCase__ ):
open_list[j].put(
lowerCamelCase__ , key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase_ = make_common_ground()
UpperCamelCase_ = blocks_blk
# hyper parameters
UpperCamelCase_ = 1
UpperCamelCase_ = 1
UpperCamelCase_ = 20
UpperCamelCase_ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (n - 1, n - 1)
UpperCamelCase_ = 1
def lowerCamelCase ( UpperCAmelCase__ : TPos , UpperCAmelCase__ : TPos , UpperCAmelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE__ :Optional[Any] = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE__ :Dict = []
SCREAMING_SNAKE_CASE__ :str = set()
for i in range(lowerCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase__ , key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE__ :list[int] = []
SCREAMING_SNAKE_CASE__ :list[int] = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , lowerCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ :str = open_list[i].top_show()
visited.add(lowerCamelCase__ )
expand_state(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
close_list_inad.append(lowerCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ :List[Any] = open_list[0].top_show()
visited.add(lowerCamelCase__ )
expand_state(
lowerCamelCase__ , 0 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
close_list_anchor.append(lowerCamelCase__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 209 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : int = 1_000_000 ) -> int:
lowerCamelCase_ : Optional[int] =set(range(3 , lowerCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCamelCase__ , lowerCamelCase__ ) ) )
lowerCamelCase_ : Any =[float(lowerCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase__ , limit + 1 , lowerCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 153 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a_ : Optional[int] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
_A = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_A = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_A = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_A = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _a (self , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = ZeroShotClassificationPipeline(
model=__a , tokenizer=__a , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _a (self , __a , __a ):
'''simple docstring'''
lowerCamelCase = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(__a , {"sequence": ANY(__a ), "labels": [ANY(__a )], "scores": [ANY(__a )]} )
# No kwarg
lowerCamelCase = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(__a , {"sequence": ANY(__a ), "labels": [ANY(__a )], "scores": [ANY(__a )]} )
lowerCamelCase = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(__a , {"sequence": ANY(__a ), "labels": [ANY(__a )], "scores": [ANY(__a )]} )
lowerCamelCase = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
__a , {"sequence": ANY(__a ), "labels": [ANY(__a ), ANY(__a )], "scores": [ANY(__a ), ANY(__a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
lowerCamelCase = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
__a , {"sequence": ANY(__a ), "labels": [ANY(__a ), ANY(__a )], "scores": [ANY(__a ), ANY(__a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
lowerCamelCase = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(__a , {"sequence": ANY(__a ), "labels": [ANY(__a )], "scores": [ANY(__a )]} )
# https://github.com/huggingface/transformers/issues/13846
lowerCamelCase = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
__a , [
{"sequence": ANY(__a ), "labels": [ANY(__a ), ANY(__a )], "scores": [ANY(__a ), ANY(__a )]}
for i in range(1 )
] , )
lowerCamelCase = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
__a , [
{"sequence": ANY(__a ), "labels": [ANY(__a ), ANY(__a )], "scores": [ANY(__a ), ANY(__a )]}
for i in range(2 )
] , )
with self.assertRaises(__a ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(__a ):
classifier(__a , candidate_labels="politics" )
with self.assertRaises(__a ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(__a ):
classifier("Who are you voting for in 2020?" , candidate_labels=__a )
with self.assertRaises(__a ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(__a ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=__a , )
self.run_entailment_id(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = zero_shot_classifier.model.config
lowerCamelCase = config.labelaid
lowerCamelCase = zero_shot_classifier.entailment_id
lowerCamelCase = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowerCamelCase = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowerCamelCase = original_labelaid
self.assertEqual(__a , zero_shot_classifier.entailment_id )
@require_torch
def _a (self ):
'''simple docstring'''
lowerCamelCase = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 1_00 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _a (self ):
'''simple docstring'''
lowerCamelCase = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
lowerCamelCase = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__a ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@require_tf
def _a (self ):
'''simple docstring'''
lowerCamelCase = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
lowerCamelCase = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__a ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def _a (self ):
'''simple docstring'''
lowerCamelCase = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
lowerCamelCase = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__a ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
lowerCamelCase = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=__a , )
self.assertEqual(
nested_simplify(__a ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def _a (self ):
'''simple docstring'''
lowerCamelCase = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
lowerCamelCase = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__a ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
lowerCamelCase = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=__a , )
self.assertEqual(
nested_simplify(__a ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , ) | 705 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Any = {'vocab_file': 'spiece.model'}
a_ : List[Any] = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
a_ : List[Any] = {'bert_for_seq_generation': 5_1_2}
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = []
_A = ['input_ids', 'attention_mask']
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<pad>" , __a="<::::>" , __a = None , **__a , ):
'''simple docstring'''
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sep_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def _a (self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a (self ):
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__(self , __a ):
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a )
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.piece_to_id(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.sp_model.IdToPiece(__a )
return token
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
lowerCamelCase = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def _a (self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,) | 484 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a_ = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a_ = {
'RUCAIBox/mvp': 1024,
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MvpTokenizer
def __init__( self : int , a : List[str]=None , a : List[str]=None , a : List[str]=None , a : int="replace" , a : Tuple="<s>" , a : List[Any]="</s>" , a : int="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : int="<pad>" , a : List[Any]="<mask>" , a : Union[str, Any]=False , a : str=True , **a : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
a , a , tokenizer_file=a , errors=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , add_prefix_space=a , trim_offsets=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Dict = getattr(a , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE : str = add_prefix_space
SCREAMING_SNAKE_CASE : List[str] = pre_tok_class(**a )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[str] = "post_processor"
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(self.backend_tokenizer , a , a )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : str = tuple(state["sep"] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state["cls"] )
SCREAMING_SNAKE_CASE : Optional[Any] = False
if state.get("add_prefix_space" , a ) != add_prefix_space:
SCREAMING_SNAKE_CASE : List[str] = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[int] = True
if state.get("trim_offsets" , a ) != trim_offsets:
SCREAMING_SNAKE_CASE : str = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : Tuple = getattr(a , state.pop("type" ) )
SCREAMING_SNAKE_CASE : Tuple = component_class(**a )
setattr(self.backend_tokenizer , a , a )
@property
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : str , a : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else value
SCREAMING_SNAKE_CASE : str = value
def __UpperCamelCase ( self : Tuple , *a : List[Any] , **a : List[str] ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = kwargs.get("is_split_into_words" , a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*a , **a )
def __UpperCamelCase ( self : List[Any] , *a : Union[str, Any] , **a : Dict ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = kwargs.get("is_split_into_words" , a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*a , **a )
def __UpperCamelCase ( self : int , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __UpperCamelCase ( self : str , a : str , a : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 25 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :int , a_ :Union[str, Any] , a_ :List[Any]) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[int] , a_ :str , a_ :Any="attention") -> Optional[int]:
lowercase :Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :])
lowercase :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2])
lowercase :str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :])
lowercase :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2])
lowercase :int = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :])
lowercase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2])
lowercase :List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :])
lowercase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2])
return k, o, q, v
def lowerCamelCase (a_ :Any , a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> List[Any]:
if split_mlp_wi:
lowercase :List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase :Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase :Dict = (wi_a, wi_a)
else:
lowercase :Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase :Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase (a_ :Any , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase (a_ :dict , *, a_ :int , a_ :bool , a_ :bool = False) -> int:
lowercase :Dict = traverse_util.flatten_dict(variables['''target'''])
lowercase :Optional[Any] = {'''/'''.join(a_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase :str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , a_)
lowercase :str = collections.OrderedDict()
# Shared embeddings.
lowercase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Union[str, Any] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :Tuple = tax_attention_lookup(a_ , a_ , '''encoder''' , '''attention''')
lowercase :Dict = layer_norm
lowercase :Dict = k.T
lowercase :Union[str, Any] = o.T
lowercase :List[Any] = q.T
lowercase :int = v.T
# Block i, layer 1 (MLP).
lowercase :Optional[int] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :str = tax_mlp_lookup(a_ , a_ , '''encoder''' , a_)
lowercase :int = layer_norm
if split_mlp_wi:
lowercase :Tuple = wi[0].T
lowercase :Tuple = wi[1].T
else:
lowercase :int = wi.T
lowercase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Dict = tax_relpos_bias_lookup(
a_ , a_ , '''encoder''').T
lowercase :str = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase :str = tax_relpos_bias_lookup(
a_ , 0 , '''encoder''').T
lowercase :List[Any] = tax_relpos_bias_lookup(
a_ , 0 , '''decoder''').T
if not is_encoder_only:
# Decoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_self_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :str = tax_attention_lookup(a_ , a_ , '''decoder''' , '''self_attention''')
lowercase :List[str] = layer_norm
lowercase :Dict = k.T
lowercase :List[Any] = o.T
lowercase :List[Any] = q.T
lowercase :Any = v.T
# Block i, layer 1 (Cross Attention).
lowercase :Tuple = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_cross_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :int = tax_attention_lookup(a_ , a_ , '''decoder''' , '''encoder_decoder_attention''')
lowercase :int = layer_norm
lowercase :Dict = k.T
lowercase :int = o.T
lowercase :List[Any] = q.T
lowercase :Tuple = v.T
# Block i, layer 2 (MLP).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :Tuple = tax_mlp_lookup(a_ , a_ , '''decoder''' , a_)
lowercase :Any = layer_norm
if split_mlp_wi:
lowercase :int = wi[0].T
lowercase :Union[str, Any] = wi[1].T
else:
lowercase :int = wi.T
lowercase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Union[str, Any] = tax_relpos_bias_lookup(a_ , a_ , '''decoder''').T
lowercase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase :int = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase (a_ :Dict , a_ :bool) -> Tuple:
lowercase :str = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase :Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase :Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''')
lowercase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase (a_ :List[str] , a_ :List[str] , a_ :Tuple , a_ :Optional[int] , a_ :List[str]) -> List[str]:
lowercase :Optional[Any] = checkpoints.load_tax_checkpoint(a_)
lowercase :Optional[int] = convert_tax_to_pytorch(
a_ , num_layers=config.num_layers , is_encoder_only=a_ , scalable_attention=a_)
lowercase :Union[str, Any] = make_state_dict(a_ , a_)
model.load_state_dict(a_ , strict=a_)
def lowerCamelCase (a_ :str , a_ :Optional[int] , a_ :Any , a_ :bool = False , a_ :bool = False , ) -> Tuple:
lowercase :Optional[int] = MTaConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase :Union[str, Any] = UMTaEncoderModel(a_)
else:
lowercase :int = UMTaForConditionalGeneration(a_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a_ , a_ , a_ , a_ , a_)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
model.save_pretrained(a_)
# Verify that we can load the checkpoint.
model.from_pretrained(a_)
print('''Done''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 677 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowerCAmelCase( unittest.TestCase):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=30 , UpperCAmelCase=4_00 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=True , UpperCAmelCase=1 / 2_55 , UpperCAmelCase=True , )-> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
__A = parent
__A = batch_size
__A = num_channels
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_rescale
__A = rescale_factor
__A = do_pad
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase=False )-> Any:
if not batched:
__A = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
__A , __A = image.size
else:
__A , __A = image.shape[1], image.shape[2]
if w < h:
__A = int(self.size['''shortest_edge'''] * h / w )
__A = self.size['''shortest_edge''']
elif w > h:
__A = self.size['''shortest_edge''']
__A = int(self.size['''shortest_edge'''] * w / h )
else:
__A = self.size['''shortest_edge''']
__A = self.size['''shortest_edge''']
else:
__A = []
for image in image_inputs:
__A , __A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
__A = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase( _a , unittest.TestCase):
"""simple docstring"""
lowerCamelCase__ = DetaImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self )-> Any:
__A = DetaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self )-> str:
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> str:
pass
def SCREAMING_SNAKE_CASE__ ( self )-> str:
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__A , __A = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
__A = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self )-> str:
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__A , __A = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
__A , __A = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__A , __A = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
__A , __A = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
# prepare image and target
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__A = json.loads(f.read() )
__A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
__A = DetaImageProcessor()
__A = image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
__A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase )
__A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
__A = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase ) )
# verify boxes
__A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase )
__A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
__A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase ) )
# verify is_crowd
__A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase ) )
# verify class_labels
__A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase ) )
# verify orig_size
__A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase ) )
# verify size
__A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> str:
# prepare image, target and masks_path
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__A = json.loads(f.read() )
__A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
__A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__A = DetaImageProcessor(format='''coco_panoptic''' )
__A = image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , masks_path=UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
__A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase )
__A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
__A = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase ) )
# verify boxes
__A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase )
__A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
__A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase ) )
# verify is_crowd
__A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase ) )
# verify class_labels
__A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase ) )
# verify masks
__A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase )
# verify orig_size
__A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase ) )
# verify size
__A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase ) )
| 341 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , UpperCAmelCase )-> List[str]:
__A = metric_id
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = [MetricMock(_a) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
'''simple docstring'''
if "tmp_path" in args:
__A = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(snake_case , match='''https://huggingface.co/docs/evaluate''' ):
func(*snake_case )
| 341 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase__ = logging.get_logger(__name__)
def UpperCAmelCase ( snake_case : Optional[Any] , snake_case : Any ):
_lowerCAmelCase:Union[str, Any] = b.T
_lowerCAmelCase:Optional[Any] = np.sum(np.square(snake_case ) , axis=1 )
_lowerCAmelCase:str = np.sum(np.square(snake_case ) , axis=0 )
_lowerCAmelCase:Optional[int] = np.matmul(snake_case , snake_case )
_lowerCAmelCase:str = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase ( snake_case : List[Any] , snake_case : int ):
_lowerCAmelCase:Tuple = x.reshape(-1 , 3 )
_lowerCAmelCase:Optional[Any] = squared_euclidean_distance(snake_case , snake_case )
return np.argmin(snake_case , axis=1 )
class a__ ( UpperCamelCase_ ):
snake_case__ = ['''pixel_values''']
def __init__( self : Union[str, Any] ,a__ : Optional[Union[List[List[int]], np.ndarray]] = None ,a__ : bool = True ,a__ : Dict[str, int] = None ,a__ : PILImageResampling = PILImageResampling.BILINEAR ,a__ : bool = True ,a__ : bool = True ,**a__ : int ,) -> None:
"""simple docstring"""
super().__init__(**a__)
_lowerCAmelCase:Optional[int] = size if size is not None else {'''height''': 256, '''width''': 256}
_lowerCAmelCase:Union[str, Any] = get_size_dict(a__)
_lowerCAmelCase:int = np.array(a__) if clusters is not None else None
_lowerCAmelCase:Optional[int] = do_resize
_lowerCAmelCase:Optional[Any] = size
_lowerCAmelCase:List[Any] = resample
_lowerCAmelCase:Optional[Any] = do_normalize
_lowerCAmelCase:Union[str, Any] = do_color_quantize
def __UpperCamelCase ( self : str ,a__ : np.ndarray ,a__ : Dict[str, int] ,a__ : PILImageResampling = PILImageResampling.BILINEAR ,a__ : Optional[Union[str, ChannelDimension]] = None ,**a__ : List[str] ,) -> np.ndarray:
"""simple docstring"""
_lowerCAmelCase:Any = get_size_dict(a__)
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}')
return resize(
a__ ,size=(size['''height'''], size['''width''']) ,resample=a__ ,data_format=a__ ,**a__)
def __UpperCamelCase ( self : Dict ,a__ : np.ndarray ,a__ : Optional[Union[str, ChannelDimension]] = None ,) -> np.ndarray:
"""simple docstring"""
_lowerCAmelCase:List[str] = rescale(image=a__ ,scale=1 / 127.5 ,data_format=a__)
_lowerCAmelCase:Optional[int] = image - 1
return image
def __UpperCamelCase ( self : Optional[Any] ,a__ : ImageInput ,a__ : bool = None ,a__ : Dict[str, int] = None ,a__ : PILImageResampling = None ,a__ : bool = None ,a__ : Optional[bool] = None ,a__ : Optional[Union[List[List[int]], np.ndarray]] = None ,a__ : Optional[Union[str, TensorType]] = None ,a__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**a__ : int ,) -> PIL.Image.Image:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase:Tuple = size if size is not None else self.size
_lowerCAmelCase:int = get_size_dict(a__)
_lowerCAmelCase:Optional[int] = resample if resample is not None else self.resample
_lowerCAmelCase:Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase:List[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_lowerCAmelCase:List[Any] = clusters if clusters is not None else self.clusters
_lowerCAmelCase:Union[str, Any] = np.array(a__)
_lowerCAmelCase:Union[str, Any] = make_list_of_images(a__)
if not valid_images(a__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''')
# All transformations expect numpy arrays.
_lowerCAmelCase:Union[str, Any] = [to_numpy_array(a__) for image in images]
if do_resize:
_lowerCAmelCase:Union[str, Any] = [self.resize(image=a__ ,size=a__ ,resample=a__) for image in images]
if do_normalize:
_lowerCAmelCase:Dict = [self.normalize(image=a__) for image in images]
if do_color_quantize:
_lowerCAmelCase:Union[str, Any] = [to_channel_dimension_format(a__ ,ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_lowerCAmelCase:Optional[int] = np.array(a__)
_lowerCAmelCase:List[str] = color_quantize(a__ ,a__).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
_lowerCAmelCase:int = images.shape[0]
_lowerCAmelCase:int = images.reshape(a__ ,-1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
_lowerCAmelCase:Optional[Any] = list(a__)
else:
_lowerCAmelCase:Union[str, Any] = [to_channel_dimension_format(a__ ,a__) for image in images]
_lowerCAmelCase:int = {'''input_ids''': images}
return BatchFeature(data=a__ ,tensor_type=a__)
| 227 |
"""simple docstring"""
def UpperCAmelCase ( snake_case : int = 100 ):
_lowerCAmelCase:Dict = set()
_lowerCAmelCase:Optional[Any] = 0
_lowerCAmelCase:int = n + 1 # maximum limit
for a in range(2 , snake_case ):
for b in range(2 , snake_case ):
_lowerCAmelCase:str = a**b # calculates the current power
collect_powers.add(snake_case ) # adds the result to the set
return len(snake_case )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 227 | 1 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
a__ : List[str] = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(SCREAMING_SNAKE_CASE_ ), version.parse(SCREAMING_SNAKE_CASE_ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def A__ ( __lowerCamelCase, __lowerCamelCase = None ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = F'''\n{hint}''' if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$', SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = requirement, None, None
else:
_lowerCAmelCase = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)', SCREAMING_SNAKE_CASE_ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F''' got {requirement}''' )
_lowerCAmelCase , _lowerCAmelCase = match[0]
_lowerCAmelCase = want_full.split(',' ) # there could be multiple requirements
_lowerCAmelCase = {}
for w in want_range:
_lowerCAmelCase = re.findall(R'^([\s!=<>]{1,2})(.+)', SCREAMING_SNAKE_CASE_ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F''' but got {requirement}''' )
_lowerCAmelCase , _lowerCAmelCase = match[0]
_lowerCAmelCase = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
_lowerCAmelCase = '.'.join([str(SCREAMING_SNAKE_CASE_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return
# check if any version is installed
try:
_lowerCAmelCase = importlib.metadata.version(SCREAMING_SNAKE_CASE_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def A__ ( __lowerCamelCase ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : Any = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ["""PoolFormerFeatureExtractor"""]
a__ : Any = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 309 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : Optional[int] ={
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] =[
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] =[
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 172 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[str]:
a_ : List[Any] = SMALL_MODEL_IDENTIFIER
a_ : Optional[int] = """pt"""
a_ : Union[str, Any] = """tf"""
def UpperCamelCase__ ( self , _lowercase ) -> str:
a_ : Union[str, Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> List[str]:
a_ : int = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : str = """mock_framework"""
# Framework provided - return whatever the user provides
a_ : Optional[int] = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
a_ : Optional[int] = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
a_ : List[str] = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
a_ : Union[str, Any] = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
a_ : Union[str, Any] = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
a_ : Union[str, Any] = FeaturesManager.determine_framework(_lowercase )
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : int = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowercase ):
a_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a_ : Dict = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_torch_available""" , _lowercase ):
a_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
a_ : Optional[Any] = MagicMock(return_value=_lowercase )
a_ : List[Any] = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , _lowercase ):
a_ : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
a_ : List[str] = MagicMock(return_value=_lowercase )
a_ : Optional[Any] = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , _lowercase ):
with self.assertRaises(_lowercase ):
a_ : Dict = FeaturesManager.determine_framework(self.test_model )
| 540 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : Optional[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 290 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : List[str] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Any = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase : Tuple = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : int = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : List[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
lowerCamelCase : Union[str, Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase : List[str] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase : List[Any] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class _UpperCamelCase :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
__lowerCAmelCase = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
__lowerCAmelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = 6_4 , __UpperCamelCase = 4 , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input["input_ids"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class _UpperCamelCase (a_ , a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = DPRReaderTokenizer
| 290 | 1 |
import functools
from typing import Any
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : list[str] ) -> bool:
# Validation
if not isinstance(snake_case_ , snake_case_ ) or len(snake_case_ ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(snake_case_ , snake_case_ ) or not all(
isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
__snake_case = {}
__snake_case = '''WORD_KEEPER'''
for word in words:
__snake_case = trie
for c in word:
if c not in trie_node:
__snake_case = {}
__snake_case = trie_node[c]
__snake_case = True
__snake_case = len(snake_case_ )
# Dynamic programming method
@functools.cache
def is_breakable(snake_case_ : int ) -> bool:
if index == len_string:
return True
__snake_case = trie
for i in range(snake_case_ , snake_case_ ):
__snake_case = trie_node.get(string[i] , snake_case_ )
if trie_node is None:
return False
if trie_node.get(snake_case_ , snake_case_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=snake_case_ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=snake_case_ , default=5 )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=snake_case_ , default=1 )
parser.add_argument('''--freeze''' , type=snake_case_ , default=snake_case_ )
parser.add_argument('''--learning_rate''' , type=snake_case_ , default=5e-4 )
parser.add_argument('''--seed''' , type=snake_case_ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=snake_case_ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=snake_case_ , default=10 )
parser.add_argument('''--weight_decay''' , type=snake_case_ , default=0.01 )
parser.add_argument('''--output_dir''' , type=snake_case_ , default='''./results''' )
return parser.parse_args()
snake_case_ = load('accuracy')
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> List[Any]:
__snake_case , __snake_case = eval_pred
__snake_case = np.argmax(snake_case_ , axis=1 )
return metric.compute(predictions=snake_case_ , references=snake_case_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Any , a__ : Any ):
"""simple docstring"""
super().__init__()
__snake_case = trainer
def a (self : List[Any] , a__ : Optional[Any] , a__ : int , a__ : Tuple , **a__ : Optional[Any] ):
"""simple docstring"""
if control.should_evaluate:
__snake_case = deepcopy(a__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowerCamelCase__ ( ) -> Dict:
__snake_case = get_args()
set_seed(args.seed )
__snake_case = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
__snake_case = dataset.train_test_split(test_size=0.2 )
__snake_case = train_test['''test'''].train_test_split(test_size=0.5 )
__snake_case = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
__snake_case = AutoTokenizer.from_pretrained(args.model_ckpt )
__snake_case = tokenizer.eos_token
__snake_case = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__snake_case = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__snake_case = False
__snake_case = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(snake_case_ : Any ):
__snake_case = tokenizer(example['''src'''] , truncation=snake_case_ , max_length=1024 )
__snake_case = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__snake_case = train_test_validation.map(
snake_case_ , batched=snake_case_ , remove_columns=train_test_validation['''train'''].column_names , )
__snake_case = DataCollatorWithPadding(tokenizer=snake_case_ )
__snake_case = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
__snake_case = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=snake_case_ , data_collator=snake_case_ , compute_metrics=snake_case_ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(snake_case_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 592 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Optional[Any] = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _A , _A=False ):
'''simple docstring'''
snake_case_ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ ( _A , _A , _A=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ""
else:
snake_case_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_A , _A )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = dct.pop(_A )
snake_case_ = val
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _A , _A , _A=False ):
'''simple docstring'''
snake_case_ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_A , )
snake_case_ = ViTHybridConfig(backbone_config=_A , image_size=384 , num_labels=1000 )
snake_case_ = False
# load original model from timm
snake_case_ = timm.create_model(_A , pretrained=_A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_A )
snake_case_ = create_rename_keys(_A , _A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , _A )
snake_case_ = "huggingface/label-files"
snake_case_ = "imagenet-1k-id2label.json"
snake_case_ = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(_A ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ = ViTHybridModel(_A ).eval()
else:
snake_case_ = ViTHybridForImageClassification(_A ).eval()
model.load_state_dict(_A )
# create image processor
snake_case_ = create_transform(**resolve_data_config({} , model=_A ) )
snake_case_ = transform.transforms
snake_case_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
snake_case_ = ViTHybridImageProcessor(
do_resize=_A , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_A , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ = prepare_img()
snake_case_ = transform(_A ).unsqueeze(0 )
snake_case_ = processor(_A , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_A , _A )
# verify logits
with torch.no_grad():
snake_case_ = model(_A )
snake_case_ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
snake_case_ = timm_model.forward_features(_A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_A , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ = timm_model(_A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_A , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
lowercase__ : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 139 | 0 |
def __UpperCamelCase ( A ):
if not isinstance(A , A ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 415 | import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( A ):
UpperCamelCase__ = args.pruning_method
UpperCamelCase__ = args.threshold
UpperCamelCase__ = args.model_name_or_path.rstrip('''/''' )
UpperCamelCase__ = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
UpperCamelCase__ = torch.load(os.path.join(A , '''pytorch_model.bin''' ) )
UpperCamelCase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
UpperCamelCase__ = MagnitudeBinarizer.apply(inputs=A , threshold=A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ = TopKBinarizer.apply(A , A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ = ThresholdBinarizer.apply(A , A , A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ , UpperCamelCase__ = -0.1, 1.1
UpperCamelCase__ = torch.sigmoid(A )
UpperCamelCase__ = s * (r - l) + l
UpperCamelCase__ = s_bar.clamp(min=0.0 , max=1.0 )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
UpperCamelCase__ = os.path.join(
os.path.dirname(A ) , f"bertarized_{os.path.basename(A )}" )
if not os.path.isdir(A ):
shutil.copytree(A , A )
print(f"\nCreated folder {target_model_path}" )
torch.save(A , os.path.join(A , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__magic_name__ =parser.parse_args()
main(args)
| 415 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="shi-labs/oneformer_demo" ) ->Dict:
with open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) as f:
UpperCAmelCase__ = json.load(UpperCamelCase__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for key, info in class_info.items():
UpperCAmelCase__ = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(UpperCamelCase__ ) )
UpperCAmelCase__ = thing_ids
UpperCAmelCase__ = class_names
return metadata
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=30 , __lowercase=400 , __lowercase=None , __lowercase=True , __lowercase=True , __lowercase=[0.5, 0.5, 0.5] , __lowercase=[0.5, 0.5, 0.5] , __lowercase=10 , __lowercase=False , __lowercase=255 , __lowercase="shi-labs/oneformer_demo" , __lowercase="ade20k_panoptic.json" , __lowercase=10 , ):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = min_resolution
UpperCAmelCase__ = max_resolution
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean
UpperCAmelCase__ = image_std
UpperCAmelCase__ = class_info_file
UpperCAmelCase__ = prepare_metadata(__lowercase , __lowercase )
UpperCAmelCase__ = num_text
UpperCAmelCase__ = repo_path
# for the post_process_functions
UpperCAmelCase__ = 2
UpperCAmelCase__ = 10
UpperCAmelCase__ = 10
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = do_reduce_labels
UpperCAmelCase__ = ignore_index
def A__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , __lowercase , __lowercase=False ):
if not batched:
UpperCAmelCase__ = image_inputs[0]
if isinstance(__lowercase , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ = int(self.size["""shortest_edge"""] * h / w )
UpperCAmelCase__ = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ = self.size["""shortest_edge"""]
UpperCAmelCase__ = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCAmelCase__ = self.size["""shortest_edge"""]
UpperCAmelCase__ = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ = max(__lowercase , key=lambda __lowercase : item[0] )[0]
UpperCAmelCase__ = max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
def A__ ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _UpperCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowercase : Optional[int] = image_processing_class
def A__ ( self ):
UpperCAmelCase__ = OneFormerImageProcessorTester(self )
@property
def A__ ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , """image_mean""" ) )
self.assertTrue(hasattr(__lowercase , """image_std""" ) )
self.assertTrue(hasattr(__lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowercase , """do_resize""" ) )
self.assertTrue(hasattr(__lowercase , """size""" ) )
self.assertTrue(hasattr(__lowercase , """ignore_index""" ) )
self.assertTrue(hasattr(__lowercase , """class_info_file""" ) )
self.assertTrue(hasattr(__lowercase , """num_text""" ) )
self.assertTrue(hasattr(__lowercase , """repo_path""" ) )
self.assertTrue(hasattr(__lowercase , """metadata""" ) )
self.assertTrue(hasattr(__lowercase , """do_reduce_labels""" ) )
def A__ ( self ):
pass
def A__ ( self ):
# Initialize image_processor
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
UpperCAmelCase__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ):
# Initialize image_processor
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
UpperCAmelCase__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ):
# Initialize image_processor
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processing_tester.get_expected_values(__lowercase , batched=__lowercase )
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , __lowercase=False , __lowercase=False , __lowercase="np" ):
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase__ = self.image_processing_tester.num_labels
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowercase )
if with_segmentation_maps:
UpperCAmelCase__ = num_labels
if is_instance_map:
UpperCAmelCase__ = list(range(__lowercase ) ) * 2
UpperCAmelCase__ = dict(enumerate(__lowercase ) )
UpperCAmelCase__ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase__ = [Image.fromarray(__lowercase ) for annotation in annotations]
UpperCAmelCase__ = image_processor(
__lowercase , ["""semantic"""] * len(__lowercase ) , __lowercase , return_tensors="""pt""" , instance_id_to_semantic_id=__lowercase , pad_and_return_pixel_mask=__lowercase , )
return inputs
def A__ ( self ):
pass
def A__ ( self ):
def common(__lowercase=False , __lowercase=None ):
UpperCAmelCase__ = self.comm_get_image_processor_inputs(
with_segmentation_maps=__lowercase , is_instance_map=__lowercase , segmentation_type=__lowercase )
UpperCAmelCase__ = inputs["""mask_labels"""]
UpperCAmelCase__ = inputs["""class_labels"""]
UpperCAmelCase__ = inputs["""pixel_values"""]
UpperCAmelCase__ = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(__lowercase , __lowercase , __lowercase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__lowercase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__lowercase )
common(is_instance_map=__lowercase , segmentation_type="""pil""" )
common(is_instance_map=__lowercase , segmentation_type="""pil""" )
def A__ ( self ):
UpperCAmelCase__ = np.zeros((20, 50) )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = binary_mask_to_rle(__lowercase )
self.assertEqual(len(__lowercase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ = fature_extractor.post_process_semantic_segmentation(__lowercase )
self.assertEqual(len(__lowercase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase__ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase__ = fature_extractor.post_process_semantic_segmentation(__lowercase , target_sizes=__lowercase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ = image_processor.post_process_instance_segmentation(__lowercase , threshold=0 )
self.assertTrue(len(__lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , __lowercase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ):
UpperCAmelCase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCAmelCase__ = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase__ = image_processor.post_process_panoptic_segmentation(__lowercase , threshold=0 )
self.assertTrue(len(__lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , __lowercase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 720 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
UpperCAmelCase__ = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowercase , __lowercase )
def A__ ( self , **__lowercase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def A__ ( self , **__lowercase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def A__ ( self ):
shutil.rmtree(self.tmpdirname )
def A__ ( self ):
UpperCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ):
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(__lowercase , return_tensors="""np""" )
UpperCAmelCase__ = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = processor(text=__lowercase )
UpperCAmelCase__ = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(__lowercase ):
processor()
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.batch_decode(__lowercase )
UpperCAmelCase__ = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 422 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :Optional[int] = DebertaTokenizer
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :int = DebertaTokenizerFast
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
lowerCamelCase__ : Any = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase__ : Union[str, Any] = {"""unk_token""": """[UNK]"""}
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def _snake_case (self , **__magic_name__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : Tuple = """lower newer"""
return input_text, output_text
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase__ : List[Any] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase__ : Tuple = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : int = tokenizer("""Hello""" , """World""" )
lowerCamelCase__ : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , __magic_name__ )
@slow
def _snake_case (self ):
lowerCamelCase__ : List[str] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase__ : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
lowerCamelCase__ : str = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase__ : Any = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase__ : Any = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowerCamelCase__ : int = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase__ : List[Any] = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase__ : Tuple = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
lowerCamelCase__ : List[Any] = tokenizer(__magic_name__ , padding=__magic_name__ )
lowerCamelCase__ : int = [tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) for seq in encoding["""input_ids"""]]
# fmt: off
lowerCamelCase__ : Union[str, Any] = {
"""input_ids""": [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase__ : Union[str, Any] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , __magic_name__ )
for expected, decoded in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(__magic_name__ , __magic_name__ )
| 157 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :Optional[int] = DebertaTokenizer
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :int = DebertaTokenizerFast
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
lowerCamelCase__ : Any = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase__ : Union[str, Any] = {"""unk_token""": """[UNK]"""}
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def _snake_case (self , **__magic_name__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : Tuple = """lower newer"""
return input_text, output_text
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase__ : List[Any] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase__ : Tuple = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : int = tokenizer("""Hello""" , """World""" )
lowerCamelCase__ : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , __magic_name__ )
@slow
def _snake_case (self ):
lowerCamelCase__ : List[str] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase__ : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
lowerCamelCase__ : str = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase__ : Any = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase__ : Any = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowerCamelCase__ : int = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase__ : List[Any] = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase__ : Tuple = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
lowerCamelCase__ : List[Any] = tokenizer(__magic_name__ , padding=__magic_name__ )
lowerCamelCase__ : int = [tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) for seq in encoding["""input_ids"""]]
# fmt: off
lowerCamelCase__ : Union[str, Any] = {
"""input_ids""": [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase__ : Union[str, Any] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , __magic_name__ )
for expected, decoded in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(__magic_name__ , __magic_name__ )
| 157 | 1 |
'''simple docstring'''
import os
import sys
UpperCamelCase_ = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCAmelCase__ ( *a_ : Any , **a_ : Tuple ) -> int:
return AutoConfig.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCAmelCase__ ( *a_ : Any , **a_ : Optional[Any] ) -> Optional[int]:
return AutoTokenizer.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCAmelCase__ ( *a_ : Optional[Any] , **a_ : List[Any] ) -> Dict:
return AutoModel.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCAmelCase__ ( *a_ : Optional[int] , **a_ : Any ) -> str:
return AutoModelForCausalLM.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCAmelCase__ ( *a_ : Optional[Any] , **a_ : List[str] ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCAmelCase__ ( *a_ : Tuple , **a_ : Tuple ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCAmelCase__ ( *a_ : Optional[Any] , **a_ : Any ) -> Optional[Any]:
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase ) | 716 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase_ = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
UpperCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCamelCase_ = dict(zip(vocab, range(len(vocab))))
UpperCamelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ = Path(tmpdirname)
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
UpperCamelCase_ = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase_ = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase_ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
UpperCamelCase_ = tokenizer(["Making tiny model"], return_tensors="pt")
UpperCamelCase_ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 599 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : List[str] = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''funnel'''
SCREAMING_SNAKE_CASE__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : List[Any] , lowercase : Tuple=3_05_22 , lowercase : Tuple=[4, 4, 4] , lowercase : List[Any]=None , lowercase : Optional[Any]=2 , lowercase : int=7_68 , lowercase : Any=12 , lowercase : Dict=64 , lowercase : str=30_72 , lowercase : List[str]="gelu_new" , lowercase : Union[str, Any]=0.1 , lowercase : Dict=0.1 , lowercase : Any=0.0 , lowercase : Union[str, Any]=0.1 , lowercase : str=None , lowercase : str=1E-9 , lowercase : List[Any]="mean" , lowercase : Union[str, Any]="relative_shift" , lowercase : Tuple=True , lowercase : Any=True , lowercase : Optional[Any]=True , **lowercase : Tuple , ):
'''simple docstring'''
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : Optional[Any] = block_sizes
UpperCAmelCase : Optional[Any] = [1] * len(lowercase ) if block_repeats is None else block_repeats
assert len(lowercase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
UpperCAmelCase : int = num_decoder_layers
UpperCAmelCase : Optional[Any] = d_model
UpperCAmelCase : Any = n_head
UpperCAmelCase : Any = d_head
UpperCAmelCase : Union[str, Any] = d_inner
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : str = activation_dropout
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = initializer_std
UpperCAmelCase : List[Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
UpperCAmelCase : Optional[Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
UpperCAmelCase : List[str] = attention_type
UpperCAmelCase : List[str] = separate_cls
UpperCAmelCase : Optional[int] = truncate_seq
UpperCAmelCase : List[Any] = pool_q_only
super().__init__(**lowercase )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowerCAmelCase ( self : int , lowercase : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def __lowerCAmelCase ( self : int , lowercase : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 595 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {"""vocab_file""": """vocab.txt"""}
snake_case_ : Optional[Any] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
snake_case_ : Optional[Any] = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def lowercase_ ( _lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = collections.OrderedDict()
with open(_lowercase , "r" , encoding="utf-8" ) as reader:
UpperCAmelCase : Tuple = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCAmelCase : Union[str, Any] = token.rstrip("\n" )
UpperCAmelCase : Tuple = index
return vocab
class snake_case__ ( lowerCAmelCase_ ):
def __init__( self : List[Any] , lowercase : str , lowercase : Any="<unk>" , lowercase : Optional[Any]=2_00 ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = vocab
UpperCAmelCase : Tuple = unk_token
UpperCAmelCase : Optional[Any] = max_input_chars_per_word
def __lowerCAmelCase ( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : str = list(lowercase )
if len(lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Any = []
while start < len(lowercase ):
UpperCAmelCase : List[Any] = len(lowercase )
UpperCAmelCase : Any = None
while start < end:
UpperCAmelCase : Optional[Any] = "".join(chars[start:end] )
if substr in self.vocab:
UpperCAmelCase : List[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowercase )
UpperCAmelCase : Dict = end
return sub_tokens
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = False
def __init__( self : Tuple , lowercase : Union[str, Any] , lowercase : Tuple="<d>" , lowercase : Tuple="</d>" , lowercase : Optional[int]="<s>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<pad>" , lowercase : List[Any]="<unk>" , lowercase : Any="</n>" , lowercase : Dict="</_>" , lowercase : int="left" , **lowercase : int , ):
'''simple docstring'''
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowercase , eod_token=lowercase , bos_token=lowercase , eos_token=lowercase , pad_token=lowercase , unk_token=lowercase , line_token=lowercase , space_token=lowercase , padding_side=lowercase , **lowercase , )
UpperCAmelCase : int = bod_token
UpperCAmelCase : int = eod_token
UpperCAmelCase : Dict = load_vocab(lowercase )
UpperCAmelCase : Optional[Any] = self.encoder[space_token]
UpperCAmelCase : int = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCAmelCase : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase : x[1] ) )
UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self.encoder["\n"]
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return len(self.encoder )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : Dict , lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = []
for x in jieba.cut(lowercase , cut_all=lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase ) )
return output_tokens
def __lowerCAmelCase ( self : Union[str, Any] , lowercase : Tuple , **lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase : int = [i for i in token_ids if i >= 0]
UpperCAmelCase : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowercase , **lowercase )
def __lowerCAmelCase ( self : Dict , lowercase : int ):
'''simple docstring'''
return token in self.encoder
def __lowerCAmelCase ( self : str , lowercase : List[str] ):
'''simple docstring'''
return "".join(lowercase )
def __lowerCAmelCase ( self : str , lowercase : Any ):
'''simple docstring'''
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self : Dict , lowercase : Dict ):
'''simple docstring'''
return self.decoder.get(lowercase , self.unk_token )
def __lowerCAmelCase ( self : Optional[int] , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if os.path.isdir(lowercase ):
UpperCAmelCase : Dict = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase : str = (filename_prefix + "-" if filename_prefix else "") + save_directory
UpperCAmelCase : Optional[Any] = 0
if " " in self.encoder:
UpperCAmelCase : int = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
UpperCAmelCase : List[str] = self.encoder["\n"]
del self.encoder["\n"]
UpperCAmelCase : List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase : x[1] ) )
with open(lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase : int = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def __lowerCAmelCase ( self : Tuple , lowercase : List[int] , lowercase : List[int] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowerCAmelCase ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase )) + [1] + ([0] * len(lowercase ))
return [1] + ([0] * len(lowercase ))
| 595 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowercase_ (unittest.TestCase ):
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=4 , ) ->str:
'''simple docstring'''
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase_ (_UpperCAmelCase, unittest.TestCase ):
A__ : Tuple = True
A__ : List[str] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
_a = FlaxBertModelTester(self )
@slow
def lowerCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
_a = FlaxBertModel.from_pretrained("bert-base-cased" )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
| 721 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCAmelCase ( UpperCamelCase_: Dict ) -> Any:
'''simple docstring'''
_a = os.path.join(args.tf_model_dir , "parameters.json" )
_a = json.loads(open(UpperCamelCase_ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
_a = args.output + ".pt"
_a = OrderedDict()
with tf.device("/CPU:0" ):
_a = tf.train.load_checkpoint(args.tf_model_dir )
_a = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_a = reader.get_tensor(UpperCamelCase_ ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_a = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_a = 8
_a = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/moe" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/softmlp/kernel" ):
_a = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_a = key_name[-9:-7]
for i in range(16 ):
_a = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_a = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/mlp" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p1/bias" ):
_a = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p2/kernel" ):
_a = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/p2/bias" ):
_a = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/ln" ):
_a = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_a = "model.blocks.%d.feed_forward.norm.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/g" ):
_a = "model.blocks.%d.feed_forward.norm.weight" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/att" ):
_a = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_a = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_a = state[:, 0, :, :]
_a = state[:, 1, :, :]
_a = state[:, 2, :, :]
_a = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
_a = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
_a = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/o/kernel" ):
_a = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_a = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/an" ):
_a = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_a = "model.blocks.%d.self_attn.norm.bias" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif key_name.endswith("/g" ):
_a = "model.blocks.%d.self_attn.norm.weight" % player
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_a = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_a = "model.%s.weight" % nlayer
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase_ )
if key_name.startswith("model/wte" ):
_a = "lm_head.weight"
_a = vnp.copy() # same in embedded
_a = torch.tensor(UpperCamelCase_ )
elif key_name.startswith("model/wob" ):
_a = "final_logits_bias"
_a = vnp.copy() # same in embedded
_a = state.reshape((1, -1) )
_a = torch.tensor(UpperCamelCase_ )
elif key_name == "model/dense/kernel":
_a = "model.last_project.weight"
_a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a = torch.tensor(UpperCamelCase_ )
elif key_name == "model/dense_1/bias":
_a = "model.last_project.bias"
_a = vnp.copy() # same because it is one dimensional
_a = torch.tensor(UpperCamelCase_ )
torch.save(UpperCamelCase_ , args.output )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 612 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class a ( unittest.TestCase ):
@require_torch
def UpperCamelCase ( self : str ) -> str:
lowerCamelCase_ = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
lowerCamelCase_ = load_dataset('ashraq/esc50' )
lowerCamelCase_ = dataset['train']['audio'][-1]['array']
lowerCamelCase_ = audio_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
lowerCamelCase_ = load_dataset('ashraq/esc50' )
lowerCamelCase_ = dataset['train']['audio'][-1]['array']
lowerCamelCase_ = audio_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
lowerCamelCase_ = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
lowerCamelCase_ = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def UpperCamelCase ( self : Optional[int] ) -> int:
pass
| 549 | 0 |
"""simple docstring"""
a = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
a = ['a', 'b', 'c', 'd', 'e']
def lowercase (snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = start
# add current to visited
visited.append(snake_case__ )
lowerCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCAmelCase = topological_sort(snake_case__ , snake_case__ , snake_case__ )
# if all neighbors visited add current to sort
sort.append(snake_case__ )
# if all vertices haven't been visited select a new one to visit
if len(snake_case__ ) != len(snake_case__ ):
for vertice in vertices:
if vertice not in visited:
lowerCAmelCase = topological_sort(snake_case__ , snake_case__ , snake_case__ )
# return sort
return sort
if __name__ == "__main__":
a = topological_sort('a', [], [])
print(sort)
| 529 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
a = 'src/transformers'
a = 'docs/source/en'
a = '.'
def lowercase (snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Tuple ) -> Any:
'''simple docstring'''
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase = f.readlines()
# Find the start prompt.
lowerCAmelCase = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
a = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
a = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
a = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
a = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase (snake_case__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , snake_case__ )
return [m.group(0 ) for m in matches]
def lowercase (snake_case__ : str , snake_case__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase = 2 if text == """✅""" or text == """❌""" else len(snake_case__ )
lowerCAmelCase = (width - text_length) // 2
lowerCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase () -> str:
'''simple docstring'''
lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(snake_case__ ):
lowerCAmelCase = None
if attr_name.endswith("""Tokenizer""" ):
lowerCAmelCase = slow_tokenizers
lowerCAmelCase = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowerCAmelCase = fast_tokenizers
lowerCAmelCase = attr_name[:-13]
elif _re_tf_models.match(snake_case__ ) is not None:
lowerCAmelCase = tf_models
lowerCAmelCase = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
lowerCAmelCase = flax_models
lowerCAmelCase = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
lowerCAmelCase = pt_models
lowerCAmelCase = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase = True
break
# Try again after removing the last word in the name
lowerCAmelCase = """""".join(camel_case_split(snake_case__ )[:-1] )
# Let's build that table!
lowerCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase = [len(snake_case__ ) + 2 for c in columns]
lowerCAmelCase = max([len(snake_case__ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase = """|""" + """|""".join([_center_text(snake_case__ , snake_case__ ) for c, w in zip(snake_case__ , snake_case__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowerCAmelCase = {True: """✅""", False: """❌"""}
for name in model_names:
lowerCAmelCase = model_name_to_prefix[name]
lowerCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(snake_case__ , snake_case__ ) for l, w in zip(snake_case__ , snake_case__ )] ) + "|\n"
return table
def lowercase (snake_case__ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = _find_text_in_file(
filename=os.path.join(snake_case__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowerCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(snake_case__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 529 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
_lowercase = 0
_lowercase = number
while duplicate > 0:
_lowercase , _lowercase = divmod(snake_case__ , 10 )
fact_sum += factorial(snake_case__ )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
snake_case = int(input("""Enter number: """).strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
) | 67 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class A_ :
'''simple docstring'''
_UpperCamelCase : float
_UpperCamelCase : TreeNode | None = None
_UpperCamelCase : TreeNode | None = None
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# Validation
def is_valid_tree(__SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(__SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 565 | 1 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
@staticmethod
def lowerCamelCase_ ( *__A : Optional[Any],**__A : int ):
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.array(_lowerCAmelCase )
_lowerCamelCase : str = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase_ ( self : int,__A : Any,__A : Optional[int],__A : Optional[int] ):
_lowerCamelCase : Tuple = MaskGenerationPipeline(model=__A,image_processor=__A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Tuple,__A : List[Any],__A : Union[str, Any] ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCamelCase_ ( self : str ):
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = pipeline("mask-generation",model="facebook/sam-vit-huge" )
_lowerCamelCase : Tuple = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg",points_per_batch=2_5_6 )
# Shortening by hashing
_lowerCamelCase : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_8_0, 6_4_0)}, "scores": 0.9967},
{"mask": {"hash": "453c7844bd", "shape": (4_8_0, 6_4_0)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (4_8_0, 6_4_0)}, "scores": 0.9909},
{"mask": {"hash": "64033ddc3f", "shape": (4_8_0, 6_4_0)}, "scores": 0.9879},
{"mask": {"hash": "801064ff79", "shape": (4_8_0, 6_4_0)}, "scores": 0.9834},
{"mask": {"hash": "6172f276ef", "shape": (4_8_0, 6_4_0)}, "scores": 0.9716},
{"mask": {"hash": "b49e60e084", "shape": (4_8_0, 6_4_0)}, "scores": 0.9612},
{"mask": {"hash": "a811e775fd", "shape": (4_8_0, 6_4_0)}, "scores": 0.9599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_8_0, 6_4_0)}, "scores": 0.9552},
{"mask": {"hash": "9d8257e080", "shape": (4_8_0, 6_4_0)}, "scores": 0.9532},
{"mask": {"hash": "32de6454a8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9516},
{"mask": {"hash": "af3d4af2c8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9499},
{"mask": {"hash": "3c6db475fb", "shape": (4_8_0, 6_4_0)}, "scores": 0.9483},
{"mask": {"hash": "c290813fb9", "shape": (4_8_0, 6_4_0)}, "scores": 0.9464},
{"mask": {"hash": "b6f0b8f606", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (4_8_0, 6_4_0)}, "scores": 0.9408},
{"mask": {"hash": "efb6cab859", "shape": (4_8_0, 6_4_0)}, "scores": 0.9335},
{"mask": {"hash": "1ff2eafb30", "shape": (4_8_0, 6_4_0)}, "scores": 0.9326},
{"mask": {"hash": "788b798e24", "shape": (4_8_0, 6_4_0)}, "scores": 0.9262},
{"mask": {"hash": "abea804f0e", "shape": (4_8_0, 6_4_0)}, "scores": 0.8999},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_8_0, 6_4_0)}, "scores": 0.8986},
{"mask": {"hash": "cd24047c8a", "shape": (4_8_0, 6_4_0)}, "scores": 0.8984},
{"mask": {"hash": "6943e6bcbd", "shape": (4_8_0, 6_4_0)}, "scores": 0.8873},
{"mask": {"hash": "b5f47c9191", "shape": (4_8_0, 6_4_0)}, "scores": 0.8871}
],)
# fmt: on
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = "facebook/sam-vit-huge"
_lowerCamelCase : Tuple = pipeline("mask-generation",model=__A )
_lowerCamelCase : Union[str, Any] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg",pred_iou_thresh=1,points_per_batch=2_5_6 )
# Shortening by hashing
_lowerCamelCase : List[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0053},
],) | 44 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""")
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
A__ : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import queue
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[Any] , A : List[Any] ):
__snake_case: Union[str, Any] = data
__snake_case: Dict = None
__snake_case: Union[str, Any] = None
def A__ ( ) -> TreeNode:
print("""\n********Press N to stop entering at any point of time********\n""")
__snake_case: Tuple = input("""Enter the value of the root node: """).strip().lower()
__snake_case: str = queue.Queue()
__snake_case: Any = TreeNode(int(SCREAMING_SNAKE_CASE__))
q.put(SCREAMING_SNAKE_CASE__)
while not q.empty():
__snake_case: Tuple = q.get()
__snake_case: int = F'''Enter the left node of {node_found.data}: '''
__snake_case: Union[str, Any] = input(SCREAMING_SNAKE_CASE__).strip().lower() or """n"""
if check == "n":
return tree_node
__snake_case: int = TreeNode(int(SCREAMING_SNAKE_CASE__))
__snake_case: int = left_node
q.put(SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = F'''Enter the right node of {node_found.data}: '''
__snake_case: List[str] = input(SCREAMING_SNAKE_CASE__).strip().lower() or """n"""
if check == "n":
return tree_node
__snake_case: Optional[Any] = TreeNode(int(SCREAMING_SNAKE_CASE__))
__snake_case: str = right_node
q.put(SCREAMING_SNAKE_CASE__)
raise
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) or not node:
return
print(node.data , end=""",""")
pre_order(node.left)
pre_order(node.right)
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) or not node:
return
in_order(node.left)
print(node.data , end=""",""")
in_order(node.right)
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) or not node:
return
post_order(node.left)
post_order(node.right)
print(node.data , end=""",""")
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) or not node:
return
__snake_case: Optional[Any] = queue.Queue()
q.put(SCREAMING_SNAKE_CASE__)
while not q.empty():
__snake_case: str = q.get()
print(node_dequeued.data , end=""",""")
if node_dequeued.left:
q.put(node_dequeued.left)
if node_dequeued.right:
q.put(node_dequeued.right)
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) or not node:
return
__snake_case: int = queue.Queue()
q.put(SCREAMING_SNAKE_CASE__)
while not q.empty():
__snake_case: List[str] = []
while not q.empty():
__snake_case: Optional[Any] = q.get()
print(node_dequeued.data , end=""",""")
if node_dequeued.left:
list_.append(node_dequeued.left)
if node_dequeued.right:
list_.append(node_dequeued.right)
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) or not node:
return
__snake_case: Union[str, Any] = []
__snake_case: List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""")
stack.append(SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = n.left
# end of while means current node doesn't have left child
__snake_case: Tuple = stack.pop()
# start to traverse its right child
__snake_case: str = n.right
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) or not node:
return
__snake_case: str = []
__snake_case: Dict = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[int] = n.left
__snake_case: List[str] = stack.pop()
print(n.data , end=""",""")
__snake_case: Dict = n.right
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) or not node:
return
__snake_case , __snake_case: Union[str, Any] = [], []
__snake_case: List[Any] = node
stacka.append(SCREAMING_SNAKE_CASE__)
while stacka: # to find the reversed order of post order, store it in stack2
__snake_case: Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left)
if n.right:
stacka.append(n.right)
stacka.append(SCREAMING_SNAKE_CASE__)
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""")
def A__ ( SCREAMING_SNAKE_CASE__ = "" , SCREAMING_SNAKE_CASE__=50 , SCREAMING_SNAKE_CASE__="*") -> str:
if not s:
return "\n" + width * char
__snake_case , __snake_case: Union[str, Any] = divmod(width - len(SCREAMING_SNAKE_CASE__) - 2 , 2)
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
__UpperCAmelCase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = tempfile.mkdtemp()
__snake_case: Tuple = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
__snake_case: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__snake_case: int = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"""do_convert_rgb""": True,
}
__snake_case: List[Any] = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A , A )
def UpperCAmelCase__ ( self : Optional[int] , **A : int ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : Optional[Any] , **A : Optional[int] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : Dict , **A : Any ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case: str = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = self.get_tokenizer()
__snake_case: Dict = self.get_rust_tokenizer()
__snake_case: Union[str, Any] = self.get_image_processor()
__snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case: List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
__snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case: Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case: Optional[Any] = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
__snake_case: Dict = self.get_image_processor(do_normalize=A )
__snake_case: int = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Tuple = self.get_image_processor()
__snake_case: Optional[int] = self.get_tokenizer()
__snake_case: List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
__snake_case: List[Any] = self.prepare_image_inputs()
__snake_case: List[str] = image_processor(A , return_tensors="""np""" )
__snake_case: Optional[Any] = processor(images=A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: str = self.get_image_processor()
__snake_case: Optional[int] = self.get_tokenizer()
__snake_case: Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
__snake_case: List[str] = """Alexandra,T-shirt的价格是15便士。"""
__snake_case: str = processor(text=A )
__snake_case: int = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.get_image_processor()
__snake_case: Union[str, Any] = self.get_tokenizer()
__snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
__snake_case: Tuple = """Alexandra,T-shirt的价格是15便士。"""
__snake_case: List[Any] = self.prepare_image_inputs()
__snake_case: Optional[Any] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = self.get_image_processor()
__snake_case: Optional[int] = self.get_tokenizer()
__snake_case: int = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
__snake_case: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case: int = processor.batch_decode(A )
__snake_case: Union[str, Any] = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: int = self.get_image_processor()
__snake_case: Optional[int] = self.get_tokenizer()
__snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
__snake_case: int = """Alexandra,T-shirt的价格是15便士。"""
__snake_case: List[str] = self.prepare_image_inputs()
__snake_case: List[str] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 155 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( a__ , unittest.TestCase):
_lowerCAmelCase = KandinskyVaaPipeline
_lowerCAmelCase = [
'''image_embeds''',
'''negative_image_embeds''',
]
_lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds''']
_lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCAmelCase = False
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : List[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase : Tuple = UNetaDConditionModel(**A )
return model
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.dummy_unet
lowerCamelCase : int = self.dummy_movq
lowerCamelCase : Any = DDIMScheduler(
num_train_timesteps=1000, beta_schedule='linear', beta_start=0.0_0085, beta_end=0.012, clip_sample=A, set_alpha_to_one=A, steps_offset=1, prediction_type='epsilon', thresholding=A, )
lowerCamelCase : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCAmelCase_ ( self, A, A=0 ):
"""simple docstring"""
lowerCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(A ) ).to(A )
lowerCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
A )
if str(A ).startswith('mps' ):
lowerCamelCase : str = torch.manual_seed(A )
else:
lowerCamelCase : int = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase : Dict = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 'cpu'
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : List[str] = self.pipeline_class(**A )
lowerCamelCase : Optional[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase : Tuple = pipe(**self.get_dummy_inputs(A ) )
lowerCamelCase : str = output.images
lowerCamelCase : List[str] = pipe(
**self.get_dummy_inputs(A ), return_dict=A, )[0]
lowerCamelCase : Any = image[0, -3:, -3:, -1]
lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : int = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
lowerCamelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(A )
lowerCamelCase : Optional[int] = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder', torch_dtype=torch.floataa )
lowerCamelCase : Tuple = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowerCamelCase : Dict = 'red cat, 4k photo'
lowerCamelCase : Optional[int] = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCamelCase , lowerCamelCase : Union[str, Any] = pipe_prior(
A, generator=A, num_inference_steps=5, negative_prompt='', ).to_tuple()
lowerCamelCase : Tuple = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCamelCase : List[str] = pipeline(
image_embeds=A, negative_image_embeds=A, generator=A, num_inference_steps=100, output_type='np', )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A, A )
| 320 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __snake_case ( a__):
_lowerCAmelCase = '''roformer'''
def __init__( self, A=5_0000, A=None, A=768, A=12, A=12, A=3072, A="gelu", A=0.1, A=0.1, A=1536, A=2, A=0.02, A=1e-12, A=0, A=False, A=True, **A, ):
"""simple docstring"""
super().__init__(pad_token_id=A, **A )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size if embedding_size is None else embedding_size
lowerCamelCase : List[Any] = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : Union[str, Any] = max_position_embeddings
lowerCamelCase : Tuple = type_vocab_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : List[Any] = rotary_value
lowerCamelCase : Dict = use_cache
class __snake_case ( a__):
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
lowerCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 320 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None # sigma(t_i)
@classmethod
def snake_case__ ( cls : Tuple )-> Union[str, Any]:
'''simple docstring'''
return cls()
@dataclass
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : str,lowercase_ : float = 0.02,lowercase_ : float = 1_0_0,lowercase_ : float = 1.007,lowercase_ : float = 8_0,lowercase_ : float = 0.05,lowercase_ : float = 5_0,)-> int:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] )-> Dict:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def snake_case__ ( self : Dict,lowercase_ : KarrasVeSchedulerState,lowercase_ : int,lowercase_ : Tuple = () )-> KarrasVeSchedulerState:
'''simple docstring'''
A__ = jnp.arange(0,lowercase_ )[::-1].copy()
A__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowercase_,schedule=jnp.array(lowercase_,dtype=jnp.floataa ),timesteps=lowercase_,)
def snake_case__ ( self : int,lowercase_ : KarrasVeSchedulerState,lowercase_ : jnp.ndarray,lowercase_ : float,lowercase_ : random.KeyArray,)-> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A__ = min(self.config.s_churn / state.num_inference_steps,2**0.5 - 1 )
else:
A__ = 0
# sample eps ~ N(0, S_noise^2 * I)
A__ = random.split(lowercase_,num=1 )
A__ = self.config.s_noise * random.normal(key=lowercase_,shape=sample.shape )
A__ = sigma + gamma * sigma
A__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case__ ( self : Union[str, Any],lowercase_ : KarrasVeSchedulerState,lowercase_ : jnp.ndarray,lowercase_ : float,lowercase_ : float,lowercase_ : jnp.ndarray,lowercase_ : bool = True,)-> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A__ = sample_hat + sigma_hat * model_output
A__ = (sample_hat - pred_original_sample) / sigma_hat
A__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase_,derivative=lowercase_,state=lowercase_ )
def snake_case__ ( self : Optional[int],lowercase_ : KarrasVeSchedulerState,lowercase_ : jnp.ndarray,lowercase_ : float,lowercase_ : float,lowercase_ : jnp.ndarray,lowercase_ : jnp.ndarray,lowercase_ : jnp.ndarray,lowercase_ : bool = True,)-> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A__ = sample_prev + sigma_prev * model_output
A__ = (sample_prev - pred_original_sample) / sigma_prev
A__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase_,derivative=lowercase_,state=lowercase_ )
def snake_case__ ( self : Optional[int],lowercase_ : KarrasVeSchedulerState,lowercase_ : str,lowercase_ : str,lowercase_ : Any )-> Any:
'''simple docstring'''
raise NotImplementedError()
| 586 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 586 | 1 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_SCREAMING_SNAKE_CASE : List[str] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_SCREAMING_SNAKE_CASE : Dict = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_SCREAMING_SNAKE_CASE : Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_SCREAMING_SNAKE_CASE : List[str] = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_SCREAMING_SNAKE_CASE : Optional[Any] = Pipe()
_SCREAMING_SNAKE_CASE : List[Any] = Pipe()
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_SCREAMING_SNAKE_CASE : Dict = temp_rs
_SCREAMING_SNAKE_CASE : List[str] = temp_rr
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = Pipe()
_SCREAMING_SNAKE_CASE : str = Pipe()
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_SCREAMING_SNAKE_CASE : str = temp_rs
_SCREAMING_SNAKE_CASE : Optional[Any] = temp_rr
process_array_.append(
Process(
target=__SCREAMING_SNAKE_CASE , args=(
len(__SCREAMING_SNAKE_CASE ) - 1,
arr[len(__SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCamelCase_()-> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = odd_even_transposition(__SCREAMING_SNAKE_CASE )
print("""Sorted List\n""" )
print(*__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 338 | """simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "Wav2Vec2FeatureExtractor"
a = "AutoTokenizer"
def __init__( self : Union[str, Any] , _A : Union[str, Any] , _A : int):
"""simple docstring"""
super().__init__(_A , _A)
_SCREAMING_SNAKE_CASE : Dict = self.feature_extractor
_SCREAMING_SNAKE_CASE : Any = False
@classmethod
def _lowerCAmelCase ( cls : Tuple , _A : Optional[Any] , **_A : Any):
"""simple docstring"""
try:
return super().from_pretrained(_A , **_A)
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , _A , )
_SCREAMING_SNAKE_CASE : List[str] = WavaVecaFeatureExtractor.from_pretrained(_A , **_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaCTCTokenizer.from_pretrained(_A , **_A)
return cls(feature_extractor=_A , tokenizer=_A)
def __call__( self : str , *_A : Union[str, Any] , **_A : Optional[int]):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_A , **_A)
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""")
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""raw_speech""")
else:
_SCREAMING_SNAKE_CASE : Any = kwargs.pop("""audio""" , _A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""sampling_rate""" , _A)
_SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""text""" , _A)
if len(_A) > 0:
_SCREAMING_SNAKE_CASE : Any = args[0]
_SCREAMING_SNAKE_CASE : int = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if audio is not None:
_SCREAMING_SNAKE_CASE : Any = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A)
if text is not None:
_SCREAMING_SNAKE_CASE : int = self.tokenizer(_A , **_A)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_SCREAMING_SNAKE_CASE : Tuple = encodings["""input_ids"""]
return inputs
def _lowerCAmelCase ( self : int , *_A : Optional[int] , **_A : List[Any]):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A)
_SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""input_features""" , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("""labels""" , _A)
if len(_A) > 0:
_SCREAMING_SNAKE_CASE : List[str] = args[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = args[1:]
if input_features is not None:
_SCREAMING_SNAKE_CASE : int = self.feature_extractor.pad(_A , *_A , **_A)
if labels is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.pad(_A , **_A)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_SCREAMING_SNAKE_CASE : Any = labels["""input_ids"""]
return input_features
def _lowerCAmelCase ( self : str , *_A : Dict , **_A : Tuple):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A)
def _lowerCAmelCase ( self : Dict , *_A : Optional[Any] , **_A : Optional[int]):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A)
@contextmanager
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""")
_SCREAMING_SNAKE_CASE : Optional[Any] = True
_SCREAMING_SNAKE_CASE : str = self.tokenizer
yield
_SCREAMING_SNAKE_CASE : str = self.feature_extractor
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
| 338 | 1 |
"""simple docstring"""
import os
import sys
import unittest
lowerCamelCase : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase : Optional[int] = os.path.join(git_repo_path, """src""", """transformers""")
lowerCamelCase : Tuple = """
{0} = None
"""
lowerCamelCase : int = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
lowerCamelCase : Dict = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(A_ )
_SCREAMING_SNAKE_CASE = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(A_ , '''tokenizers''' )
_SCREAMING_SNAKE_CASE = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(A_ , '''tensorflow_text''' )
_SCREAMING_SNAKE_CASE = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(A_ , '''sentencepiece_and_tokenizers''' )
_SCREAMING_SNAKE_CASE = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(A_ , '''sentencepiece_and_tensorflow_text''' )
_SCREAMING_SNAKE_CASE = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(A_ , '''sentencepiece_and_tokenizers_and_vision''' )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A_ )
self.assertIn('''tensorflow_text''' , A_ )
self.assertIn('''sentencepiece_and_tokenizers''' , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A_ , '''\nCONSTANT = None\n''' )
_SCREAMING_SNAKE_CASE = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_SCREAMING_SNAKE_CASE = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
_SCREAMING_SNAKE_CASE = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A_ , A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
_SCREAMING_SNAKE_CASE = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A_ )
| 712 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
lowerCamelCase : List[Any] = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = set()
_SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_SCREAMING_SNAKE_CASE = char
_SCREAMING_SNAKE_CASE = set(UpperCamelCase__ )
return pairs
class __snake_case( __A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , **A_ , ):
'''simple docstring'''
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , **A_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = merges_file
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 3
self.add_from_file(A_ )
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='''utf-8''' ) as merges_handle:
_SCREAMING_SNAKE_CASE = merges_handle.read().split('''\n''' )[:-1]
_SCREAMING_SNAKE_CASE = [tuple(merge.split()[:-1] ) for merge in merges]
_SCREAMING_SNAKE_CASE = dict(zip(A_ , range(len(A_ ) ) ) )
_SCREAMING_SNAKE_CASE = {}
def A ( self , A_ , A_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def A ( self , A_ , A_ = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self ):
'''simple docstring'''
return len(self.encoder )
def A ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , A_ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_SCREAMING_SNAKE_CASE = tuple(A_ )
_SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_SCREAMING_SNAKE_CASE = get_pairs(A_ )
if not pairs:
return token
while True:
_SCREAMING_SNAKE_CASE = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = bigram
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
while i < len(A_ ):
try:
_SCREAMING_SNAKE_CASE = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_SCREAMING_SNAKE_CASE = tuple(A_ )
_SCREAMING_SNAKE_CASE = new_word
if len(A_ ) == 1:
break
else:
_SCREAMING_SNAKE_CASE = get_pairs(A_ )
_SCREAMING_SNAKE_CASE = '''@@ '''.join(A_ )
_SCREAMING_SNAKE_CASE = word[:-4]
_SCREAMING_SNAKE_CASE = word
return word
def A ( self , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = re.findall(r'''\S+\n?''' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(''' ''' ) ) )
return split_tokens
def A ( self , A_ ):
'''simple docstring'''
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def A ( self , A_ ):
'''simple docstring'''
return self.decoder.get(A_ , self.unk_token )
def A ( self , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ''' '''.join(A_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def A ( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(A_ ):
copyfile(self.merges_file , A_ )
return out_vocab_file, out_merge_file
def A ( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ):
try:
with open(A_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
_SCREAMING_SNAKE_CASE = f.readlines()
for lineTmp in lines:
_SCREAMING_SNAKE_CASE = lineTmp.strip()
_SCREAMING_SNAKE_CASE = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
_SCREAMING_SNAKE_CASE = line[:idx]
_SCREAMING_SNAKE_CASE = len(self.encoder )
| 168 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _snake_case ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 7 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 | 1 |
from __future__ import annotations
lowerCamelCase : str = tuple[int, int, int]
lowerCamelCase : List[str] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCamelCase : Tuple = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCamelCase : Tuple = "EGZWVONAHDCLFQMSIPJBYUKXTR"
lowerCamelCase : Dict = "FOBHMDKEXQNRAULPGSJVTYICZW"
lowerCamelCase : Optional[Any] = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
lowerCamelCase : List[str] = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
lowerCamelCase : int = "RMDJXFUWGISLHVTCQNKYPBEZOA"
lowerCamelCase : str = "SGLCPQWZHKXAREONTFBVIYJUDM"
lowerCamelCase : Any = "HVSICLTYKQUBXDWAJZOMFGPREN"
lowerCamelCase : Tuple = "RZWQHFMVDBKICJLNTUXAGYPSOE"
lowerCamelCase : str = "LFKIJODBEGAMQPXVUHYSTCZRWN"
lowerCamelCase : Union[str, Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : Dict ):
'''simple docstring'''
if (unique_rotsel := len(set(_lowerCAmelCase ) )) < 3:
lowerCamelCase_ = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_lowerCAmelCase )
# Checks if rotor positions are valid
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = rotpos
if not 0 < rotorposa <= len(_lowerCAmelCase ):
lowerCamelCase_ = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_lowerCAmelCase )
if not 0 < rotorposa <= len(_lowerCAmelCase ):
lowerCamelCase_ = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowerCAmelCase )
if not 0 < rotorposa <= len(_lowerCAmelCase ):
lowerCamelCase_ = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowerCAmelCase )
# Validates string and returns dict
lowerCamelCase_ = _plugboard(_lowerCAmelCase )
return rotpos, rotsel, pbdict
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase_ = f"""Plugboard setting isn't type string ({type(_lowerCAmelCase )})"""
raise TypeError(_lowerCAmelCase )
elif len(_lowerCAmelCase ) % 2 != 0:
lowerCamelCase_ = f"""Odd number of symbols ({len(_lowerCAmelCase )})"""
raise Exception(_lowerCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCamelCase_ = set()
for i in pbstring:
if i not in abc:
lowerCamelCase_ = f"""'{i}' not in list of symbols"""
raise Exception(_lowerCAmelCase )
elif i in tmppbl:
lowerCamelCase_ = f"""Duplicate symbol ({i})"""
raise Exception(_lowerCAmelCase )
else:
tmppbl.add(_lowerCAmelCase )
del tmppbl
# Created the dictionary
lowerCamelCase_ = {}
for j in range(0 , len(_lowerCAmelCase ) - 1 , 2 ):
lowerCamelCase_ = pbstring[j + 1]
lowerCamelCase_ = pbstring[j]
return pb
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Dict , lowercase : Dict = (rotora, rotora, rotora) , lowercase : List[str] = "" , ):
'''simple docstring'''
lowerCamelCase_ = text.upper()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = _validator(
_lowerCAmelCase , _lowerCAmelCase , plugb.upper() )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = rotor_position
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCamelCase_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCamelCase_ = plugboard[symbol]
# rotor ra --------------------------
lowerCamelCase_ = abc.index(_lowerCAmelCase ) + rotorposa
lowerCamelCase_ = rotora[index % len(_lowerCAmelCase )]
# rotor rb --------------------------
lowerCamelCase_ = abc.index(_lowerCAmelCase ) + rotorposa
lowerCamelCase_ = rotora[index % len(_lowerCAmelCase )]
# rotor rc --------------------------
lowerCamelCase_ = abc.index(_lowerCAmelCase ) + rotorposa
lowerCamelCase_ = rotora[index % len(_lowerCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCamelCase_ = reflector[symbol]
# 2nd rotors
lowerCamelCase_ = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
lowerCamelCase_ = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
lowerCamelCase_ = abc[rotora.index(_lowerCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCamelCase_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
lowerCamelCase_ = 0
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
lowerCamelCase_ = 0
rotorposa += 1
if rotorposa >= len(_lowerCAmelCase ):
lowerCamelCase_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = "This is my Python script that emulates the Enigma machine from WWII."
lowerCamelCase : List[Any] = (1, 1, 1)
lowerCamelCase : List[str] = "pictures"
lowerCamelCase : Any = (rotora, rotora, rotora)
lowerCamelCase : int = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 701 |
import cva
import numpy as np
class A:
'''simple docstring'''
def __init__( self : int , A_ : float , A_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
lowerCamelCase_ = k
lowerCamelCase_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.k )
def a__ ( self : Any , A_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ = cva.imread(A_ , 0 )
lowerCamelCase_ , lowerCamelCase_ = img.shape
lowerCamelCase_ = []
lowerCamelCase_ = img.copy()
lowerCamelCase_ = cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
lowerCamelCase_ , lowerCamelCase_ = np.gradient(A_ )
lowerCamelCase_ = dx**2
lowerCamelCase_ = dy**2
lowerCamelCase_ = dx * dy
lowerCamelCase_ = 0.04
lowerCamelCase_ = self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
lowerCamelCase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ = (wxx * wyy) - (wxy**2)
lowerCamelCase_ = wxx + wyy
lowerCamelCase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase , lowerCamelCase : Optional[int] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 651 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : Optional[Any] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ['''OwlViTFeatureExtractor''']
UpperCamelCase__ : List[Any] = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
a : Optional[int] = '''naver-clova-ix/donut-base'''
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Dict:
a : Optional[Any] = DonutProcessor.from_pretrained(lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Optional[Any] = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
a : Tuple = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
a : List[Any] = self.processor.tokenajson(lowerCAmelCase__ )
self.assertDictEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 633 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( _A ):
a = ["""image_processor""", """tokenizer"""]
a = """BlipImageProcessor"""
a = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Dict = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : int = self.image_processor
def __call__( self: Tuple , UpperCamelCase__: ImageInput = None , UpperCamelCase__: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__: bool = True , UpperCamelCase__: Union[bool, str, PaddingStrategy] = False , UpperCamelCase__: Union[bool, str, TruncationStrategy] = None , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: int = 0 , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = False , UpperCamelCase__: bool = True , UpperCamelCase__: Optional[Union[str, TensorType]] = None , **UpperCamelCase__: List[str] , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCamelCase__ : Optional[int] = self.tokenizer
lowerCamelCase__ : List[str] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
# add pixel_values
lowerCamelCase__ : Any = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
else:
lowerCamelCase__ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def lowerCamelCase_ ( self: Optional[int] , *UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[Any] ):
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def lowerCamelCase_ ( self: Union[str, Any] , *UpperCamelCase__: Dict , **UpperCamelCase__: Optional[Any] ):
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.tokenizer.model_input_names
lowerCamelCase__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """xmod"""
def __init__( self : List[Any] , _lowerCAmelCase : Any=3_0_5_2_2 , _lowerCAmelCase : Tuple=7_6_8 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : int=3_0_7_2 , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : str=5_1_2 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : int=1e-12 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[str]="absolute" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : str=2 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=("en_XX",) , _lowerCAmelCase : int=None , **_lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =hidden_act
__lowercase =intermediate_size
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =position_embedding_type
__lowercase =use_cache
__lowercase =classifier_dropout
__lowercase =pre_norm
__lowercase =adapter_reduction_factor
__lowercase =adapter_layer_norm
__lowercase =adapter_reuse_layer_norm
__lowercase =ln_before_adapter
__lowercase =list(_lowerCAmelCase)
__lowercase =default_language
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 474 | 0 |
from __future__ import annotations
import math
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
snake_case_ : str =[num for num in range(3, 100001, 2) if not is_prime(num)]
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
__A = []
for num in range(len(__lowerCAmelCase ) ):
__A = 0
while 2 * i * i <= odd_composites[num]:
__A = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def UpperCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 709 |
import operator as op
snake_case_ : Optional[Any] ='''scaler.pt'''
snake_case_ : Any ='''pytorch_model'''
snake_case_ : Optional[Any] ='''random_states'''
snake_case_ : Tuple ='''optimizer'''
snake_case_ : str ='''scheduler'''
snake_case_ : str ='''pytorch_model.bin'''
snake_case_ : Optional[Any] ='''pytorch_model.bin.index.json'''
snake_case_ : str ='''model.safetensors'''
snake_case_ : Optional[int] ='''model.safetensors.index.json'''
snake_case_ : Dict ='''1.10.2'''
snake_case_ : List[str] ='''py38'''
snake_case_ : List[str] ='''4.17.0'''
snake_case_ : Optional[int] =['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
snake_case_ : Optional[int] =['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
snake_case_ : Any =['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
snake_case_ : Optional[int] =['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
snake_case_ : Optional[Any] =['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
snake_case_ : str ='''2.0.1'''
snake_case_ : Tuple =['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
snake_case_ : Union[str, Any] =['''default''', '''reduce-overhead''', '''max-autotune''']
snake_case_ : Optional[int] ={'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
snake_case_ : Dict =[
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
snake_case_ : Tuple =['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
snake_case_ : Union[str, Any] =['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 205 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase__ ( lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Dict ) -> Any:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ : Any = TOKENIZER_CLASSES
else:
lowerCAmelCase__ : List[Any] = {tokenizer_name: getattr(lowerCamelCase , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ : Optional[Any] = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ : Tuple = True
if checkpoint_name is None:
lowerCAmelCase__ : Tuple = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ : int = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ : Dict = tokenizer_class.from_pretrained(lowerCamelCase , force_download=lowerCamelCase )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = checkpoint.split("/" )
lowerCAmelCase__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
elif add_prefix:
lowerCAmelCase__ : List[str] = checkpoint
lowerCAmelCase__ : List[Any] = dump_path
else:
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[int] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ : str = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ : List[str] = file_path.split(lowerCamelCase )[-1][0]
if next_char == "/":
lowerCAmelCase__ : List[str] = os.path.join(lowerCamelCase , lowerCamelCase )
lowerCAmelCase__ : Any = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ : int = tokenizer.save_pretrained(
lowerCamelCase , legacy_format=lowerCamelCase , filename_prefix=lowerCamelCase )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(lowerCamelCase )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__UpperCAmelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 308 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCamelCase : str , lowerCamelCase : list[str] | None = None , lowerCamelCase : dict[str, float] | None = None , lowerCamelCase : bool = False , ) -> tuple[int, float, str]:
lowerCAmelCase__ : List[Any] = cipher_alphabet or [chr(lowerCamelCase ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase__ : str = {
"a": 0.0_84_97,
"b": 0.0_14_92,
"c": 0.0_22_02,
"d": 0.0_42_53,
"e": 0.1_11_62,
"f": 0.0_22_28,
"g": 0.0_20_15,
"h": 0.0_60_94,
"i": 0.0_75_46,
"j": 0.0_01_53,
"k": 0.0_12_92,
"l": 0.0_40_25,
"m": 0.0_24_06,
"n": 0.0_67_49,
"o": 0.0_75_07,
"p": 0.0_19_29,
"q": 0.0_00_95,
"r": 0.0_75_87,
"s": 0.0_63_27,
"t": 0.0_93_56,
"u": 0.0_27_58,
"v": 0.0_09_78,
"w": 0.0_25_60,
"x": 0.0_01_50,
"y": 0.0_19_94,
"z": 0.0_00_77,
}
else:
# Custom frequencies dictionary
lowerCAmelCase__ : List[str] = frequencies_dict
if not case_sensitive:
lowerCAmelCase__ : Tuple = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowerCamelCase ) ):
lowerCAmelCase__ : Tuple = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase__ : Tuple = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase__ : List[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase__ : Dict = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ : Tuple = decrypted_with_shift.lower().count(lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ : List[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ : Optional[Any] = decrypted_with_shift.count(lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ : Optional[int] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase__ : int = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCamelCase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase__ : int = min(
lowerCamelCase , key=lowerCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[str] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 308 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _UpperCamelCase :
def __init__( self :Any , lowerCamelCase :List[str] , lowerCamelCase :Optional[int]=13 , lowerCamelCase :Optional[Any]=2 , lowerCamelCase :Any=24 , lowerCamelCase :Union[str, Any]=16 , lowerCamelCase :Any=True , lowerCamelCase :int=True , lowerCamelCase :Optional[Any]=32 , lowerCamelCase :Union[str, Any]=5 , lowerCamelCase :Tuple=4 , lowerCamelCase :Optional[Any]=37 , lowerCamelCase :Optional[Any]="gelu" , lowerCamelCase :int=0.1 , lowerCamelCase :Tuple=0.1 , lowerCamelCase :List[str]=10 , lowerCamelCase :Optional[Any]=0.02 , lowerCamelCase :Optional[int]=None , lowerCamelCase :Optional[Any]=2 , lowerCamelCase :List[Any]=2 , ) -> Union[str, Any]:
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = max_length
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = frequency_stride
UpperCAmelCase__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ = num_patches + 2
def UpperCAmelCase_ ( self :int ) -> List[str]:
UpperCAmelCase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, input_values, labels
def UpperCAmelCase_ ( self :List[Any] ) -> Any:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :List[str] , lowerCamelCase :List[str] , lowerCamelCase :List[str] ) -> Optional[Any]:
UpperCAmelCase__ = ASTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :List[Any] ) -> str:
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Optional[int] , lowerCamelCase :List[Any] , lowerCamelCase :str , lowerCamelCase :List[Any] , lowerCamelCase :int ) -> str:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase_ ( self :List[str] ) -> int:
UpperCAmelCase__ = ASTModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self :Tuple ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def UpperCAmelCase_ ( self :List[str] ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self :Tuple ) -> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def UpperCAmelCase_ ( self :int ) -> Any:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :int ) -> Optional[Any]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = ASTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
UpperCAmelCase__ , UpperCAmelCase__ = torchaudio.load(_lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self :str ) -> Dict:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase_ ( self :str ) -> Optional[int]:
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase )
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ = prepare_audio()
UpperCAmelCase__ = audio.squeeze().numpy()
UpperCAmelCase__ = feature_extractor(lowerCamelCase , sampling_rate=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**lowerCamelCase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
UpperCAmelCase__ = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 711 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {"tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : Tuple = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase_ = None
def __init__( self :List[Any] , lowerCamelCase :Union[str, Any]=None , lowerCamelCase :Optional[Any]=None , lowerCamelCase :List[Any]=None , lowerCamelCase :Union[str, Any]="<unk>" , lowerCamelCase :Union[str, Any]="<s>" , lowerCamelCase :Union[str, Any]="</s>" , lowerCamelCase :str="<pad>" , lowerCamelCase :Any=False , lowerCamelCase :Optional[Any]=False , **lowerCamelCase :Optional[Any] , ) -> Any:
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
UpperCAmelCase__ = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**lowerCamelCase )
UpperCAmelCase__ = add_prefix_space
def UpperCAmelCase_ ( self :int , *lowerCamelCase :Any , **lowerCamelCase :int ) -> BatchEncoding:
UpperCAmelCase__ = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Any , *lowerCamelCase :List[Any] , **lowerCamelCase :Union[str, Any] ) -> BatchEncoding:
UpperCAmelCase__ = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :str , lowerCamelCase :Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase__ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :"Conversation" ) -> List[int]:
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
UpperCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 364 | 0 |
from __future__ import annotations
import bisect
def lowerCAmelCase_ ( lowercase: list[int] , lowercase: int , lowercase: int = 0 , lowercase: int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
_UpperCamelCase: Union[str, Any] = len(lowercase )
while lo < hi:
_UpperCamelCase: str = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_UpperCamelCase: List[Any] = mid + 1
else:
_UpperCamelCase: Union[str, Any] = mid
return lo
def lowerCAmelCase_ ( lowercase: list[int] , lowercase: int , lowercase: int = 0 , lowercase: int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
_UpperCamelCase: str = len(lowercase )
while lo < hi:
_UpperCamelCase: Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_UpperCamelCase: str = mid + 1
else:
_UpperCamelCase: Any = mid
return lo
def lowerCAmelCase_ ( lowercase: list[int] , lowercase: int , lowercase: int = 0 , lowercase: int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def lowerCAmelCase_ ( lowercase: list[int] , lowercase: int , lowercase: int = 0 , lowercase: int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase )
def lowerCAmelCase_ ( lowercase: list[int] , lowercase: int ) -> int | None:
'''simple docstring'''
_UpperCamelCase: List[Any] = 0
_UpperCamelCase: Any = len(lowercase ) - 1
while left <= right:
_UpperCamelCase: Optional[int] = left + (right - left) // 2
_UpperCamelCase: int = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_UpperCamelCase: Any = midpoint - 1
else:
_UpperCamelCase: Union[str, Any] = midpoint + 1
return None
def lowerCAmelCase_ ( lowercase: list[int] , lowercase: int ) -> int | None:
'''simple docstring'''
_UpperCamelCase: str = bisect.bisect_left(lowercase , lowercase )
if index != len(lowercase ) and sorted_collection[index] == item:
return index
return None
def lowerCAmelCase_ ( lowercase: list[int] , lowercase: int , lowercase: int , lowercase: int ) -> int | None:
'''simple docstring'''
if right < left:
return None
_UpperCamelCase: Union[str, Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase )
if __name__ == "__main__":
UpperCAmelCase_ = input('''Enter numbers separated by comma:\n''').strip()
UpperCAmelCase_ = sorted(int(item) for item in user_input.split(''','''))
UpperCAmelCase_ = int(input('''Enter a single number to be found in the list:\n'''))
UpperCAmelCase_ = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""") | 271 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 271 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 472 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Dict = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 472 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 100 , ):
"""simple docstring"""
lowercase__ : Any = x_start
lowercase__ : Optional[Any] = fnc(lowerCAmelCase__ )
lowercase__ : List[Any] = 0.0
for _ in range(lowerCAmelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ : Tuple = (x_end - x_start) / steps + xa
lowercase__ : Union[str, Any] = fnc(lowerCAmelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ : Any = xa
lowercase__ : List[Any] = fxa
return length
if __name__ == "__main__":
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
lowerCAmelCase__ = 1_0
while i <= 1_0_0_0_0_0:
print(f'''With {i} steps: {line_length(f, -1_0, 1_0, i)}''')
i *= 1_0
| 496 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE : List[str] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, 'models/bert/' ) )
SCREAMING_SNAKE_CASE_ = self.transformer_dir
shutil.copy(
os.path.join(_lowercase, 'src/transformers/models/bert/modeling_bert.py' ), os.path.join(self.transformer_dir, 'models/bert/modeling_bert.py' ), )
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase=None ) -> List[str]:
SCREAMING_SNAKE_CASE_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
SCREAMING_SNAKE_CASE_ = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
SCREAMING_SNAKE_CASE_ = black.format_str(_lowercase, mode=_lowercase )
SCREAMING_SNAKE_CASE_ = os.path.join(self.transformer_dir, 'new_code.py' )
with open(_lowercase, 'w', newline='\n' ) as f:
f.write(_lowercase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowercase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=_lowercase )
with open(_lowercase, 'r' ) as f:
self.assertTrue(f.read(), _lowercase )
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(_lowercase, _lowercase )
def a__ ( self ) -> Union[str, Any]:
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', REFERENCE_CODE + '\n', )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', _lowercase, )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', re.sub('Bert', 'TestModel', _lowercase ), )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_ = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""", f"""{long_class_name}LMPredictionHead""", re.sub('Bert', _lowercase, _lowercase ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', _lowercase, overwrite_result=re.sub('Bert', 'TestModel', _lowercase ), )
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = check_copies.LOCALIZED_READMES['README_zh-hans.md']
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowercase, _lowercase, localized_readme['format_model_list'] )
self.assertFalse(_lowercase )
self.assertEqual(_lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowercase, _lowercase, localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowercase )
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowercase, _lowercase, localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(_lowercase, _lowercase )
| 294 | 0 |
lowerCAmelCase__ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
lowerCAmelCase__ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
lowerCAmelCase__ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowerCAmelCase__ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
lowerCAmelCase__ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
lowerCAmelCase__ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
lowerCAmelCase__ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
lowerCAmelCase__ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 721 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'''nielsr/canine-s''': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
SCREAMING_SNAKE_CASE = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0Xe000
SCREAMING_SNAKE_CASE = 0Xe001
SCREAMING_SNAKE_CASE = 0Xe002
SCREAMING_SNAKE_CASE = 0Xe003
SCREAMING_SNAKE_CASE = 0Xe004
# Maps special codepoints to human-readable names.
SCREAMING_SNAKE_CASE = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
SCREAMING_SNAKE_CASE = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , snake_case__ : Tuple=chr(lowercase__ ) , snake_case__ : Optional[int]=chr(lowercase__ ) , snake_case__ : List[str]=chr(lowercase__ ) , snake_case__ : Optional[Any]=chr(lowercase__ ) , snake_case__ : Any=chr(lowercase__ ) , snake_case__ : int=chr(lowercase__ ) , snake_case__ : Optional[Any]=False , snake_case__ : Union[str, Any]=20_48 , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else bos_token
UpperCAmelCase__ : str = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token
UpperCAmelCase__ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else sep_token
UpperCAmelCase__ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else cls_token
UpperCAmelCase__ : Tuple = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , model_max_length=lowercase__ , **lowercase__ , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase__ : List[str] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase__ : Any = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase__ : Tuple = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase__ : str = UNICODE_VOCAB_SIZE
UpperCAmelCase__ : Union[str, Any] = len(self._special_codepoints )
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return self._unicode_vocab_size
def UpperCamelCase ( self : Any , snake_case__ : Optional[Any] ):
'''simple docstring'''
return list(lowercase__ )
def UpperCamelCase ( self : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
try:
return ord(lowercase__ )
except TypeError:
raise ValueError(F"""invalid token: \'{token}\'""" )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowercase__ )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
return "".join(lowercase__ )
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : List[Any] = None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.sep_token_id]
UpperCAmelCase__ : Any = [self.cls_token_id]
UpperCAmelCase__ : List[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase ( self : Dict , snake_case__ : str , snake_case__ : Tuple = None , snake_case__ : Tuple = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
UpperCAmelCase__ : List[str] = [1] + ([0] * len(lowercase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowercase__ )) + [1]
return result
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase__ : Any = [self.cls_token_id]
UpperCAmelCase__ : Any = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase ( self : Tuple , snake_case__ : Dict , snake_case__ : Any = None ):
'''simple docstring'''
return ()
| 199 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = ["""image_processor""", """tokenizer"""]
lowercase_ = """LayoutLMv2ImageProcessor"""
lowercase_ = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__ ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
__A =kwargs.pop('''feature_extractor''' )
__A =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ , lowercase__ )
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__A =self.image_processor(images=lowercase__ , return_tensors=lowercase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase__ , lowercase__ ):
__A =[text] # add batch dimension (as the image processor always adds a batch dimension)
__A =features['''words''']
__A =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel values
__A =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__A =self.get_overflowing_images(lowercase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
__A =images
return encoded_inputs
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowercase__ )} and {len(lowercase__ )}''' )
return images_with_overflow
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 184 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A_ ( snake_case , snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case , snake_case ) ) )
def A_ ( snake_case , snake_case ):
if dataset.ndim != value_array.ndim:
SCREAMING_SNAKE_CASE:List[str] = (
"Wrong input data's dimensions... "
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
SCREAMING_SNAKE_CASE:int = (
"Wrong input data's shape... "
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
SCREAMING_SNAKE_CASE:Union[str, Any] = (
"Input data have different datatype... "
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for value in value_array:
SCREAMING_SNAKE_CASE:List[Any] = euclidean(snake_case , dataset[0] )
SCREAMING_SNAKE_CASE:Optional[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
SCREAMING_SNAKE_CASE:List[str] = euclidean(snake_case , snake_case )
if dist > temp_dist:
SCREAMING_SNAKE_CASE:Any = temp_dist
SCREAMING_SNAKE_CASE:str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A_ ( snake_case , snake_case ):
return np.dot(snake_case , snake_case ) / (norm(snake_case ) * norm(snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 465 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A_ = float("nan")
class _snake_case :
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = sys.stdout
SCREAMING_SNAKE_CASE:List[Any] = open(SCREAMING_SNAKE_CASE__ ,"a" )
def __getattr__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return getattr(self.stdout ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ):
self.stdout.write(SCREAMING_SNAKE_CASE__ )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" ,"" ,SCREAMING_SNAKE_CASE__ ,0 ,re.M ) )
def A_ ( snake_case=80 , snake_case=False ):
SCREAMING_SNAKE_CASE:Tuple = []
# deal with critical env vars
SCREAMING_SNAKE_CASE:Optional[int] = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
SCREAMING_SNAKE_CASE:Any = os.environ.get(snake_case , snake_case )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
SCREAMING_SNAKE_CASE:Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(snake_case )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
SCREAMING_SNAKE_CASE:str = []
SCREAMING_SNAKE_CASE:str = ""
while len(snake_case ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(snake_case ) == 0 or len(snake_case ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case )
SCREAMING_SNAKE_CASE:Tuple = ""
return "\\\n".join(snake_case )
def A_ ( snake_case , snake_case ):
# unwrap multi-line input
SCREAMING_SNAKE_CASE:List[Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
SCREAMING_SNAKE_CASE:int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
SCREAMING_SNAKE_CASE:Union[str, Any] = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
SCREAMING_SNAKE_CASE:Union[str, Any] = subprocess.run(snake_case , capture_output=snake_case , text=snake_case )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
SCREAMING_SNAKE_CASE:Optional[Any] = variation.replace(" " , "-" )
with open(Path(snake_case ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(snake_case ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE:List[str] = json.load(snake_case )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
SCREAMING_SNAKE_CASE:str = []
SCREAMING_SNAKE_CASE:List[str] = []
SCREAMING_SNAKE_CASE:Any = F'''{id}: {variation:<{longest_variation_len}}'''
SCREAMING_SNAKE_CASE:Tuple = F'''{preamble}: '''
SCREAMING_SNAKE_CASE:Any = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case ) , desc=snake_case , leave=snake_case ):
SCREAMING_SNAKE_CASE:Dict = process_run_single(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Tuple = single_run_metrics[target_metric_key]
if not math.isnan(snake_case ):
metrics.append(snake_case )
results.append(snake_case )
outcome += "✓"
else:
outcome += "✘"
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''\33[2K\r{outcome}'''
if len(snake_case ) > 0:
SCREAMING_SNAKE_CASE:Dict = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
SCREAMING_SNAKE_CASE:Union[str, Any] = round(mean_metrics[target_metric_key] , 2 )
SCREAMING_SNAKE_CASE:int = F'''{outcome} {mean_target}'''
if len(snake_case ) > 1:
results_str += F''' {tuple(round(snake_case , 2 ) for x in results )}'''
print(snake_case )
SCREAMING_SNAKE_CASE:List[str] = variation
return mean_metrics
else:
print(snake_case )
return {variation_key: variation, target_metric_key: nan}
def A_ ( ):
SCREAMING_SNAKE_CASE:int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = pd.DataFrame(snake_case )
SCREAMING_SNAKE_CASE:str = "variation"
SCREAMING_SNAKE_CASE:str = "diff_%"
SCREAMING_SNAKE_CASE:Union[str, Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
SCREAMING_SNAKE_CASE:Any = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case ):
# as a fallback, use the minimal value as the sentinel
SCREAMING_SNAKE_CASE:Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case ):
SCREAMING_SNAKE_CASE:Tuple = df.apply(
lambda snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
SCREAMING_SNAKE_CASE:Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
SCREAMING_SNAKE_CASE:Union[str, Any] = df.reindex(snake_case , axis="columns" ) # reorder cols
# capitalize
SCREAMING_SNAKE_CASE:str = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
SCREAMING_SNAKE_CASE:int = df.rename(lambda snake_case : c.replace("_" , "<br>" ) , axis="columns" )
SCREAMING_SNAKE_CASE:Dict = df.rename(lambda snake_case : c.replace("_" , "\n" ) , axis="columns" )
SCREAMING_SNAKE_CASE:List[Any] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case , floatfmt=".2f" )]
print("\n\n".join(snake_case ) )
def A_ ( ):
SCREAMING_SNAKE_CASE:Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=snake_case , type=snake_case , required=snake_case , help="Base cmd" , )
parser.add_argument(
"--variations" , default=snake_case , type=snake_case , nargs="+" , required=snake_case , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=snake_case , type=snake_case , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=snake_case , type=snake_case , required=snake_case , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=snake_case , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=snake_case , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=snake_case , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=snake_case , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
SCREAMING_SNAKE_CASE:int = parser.parse_args()
SCREAMING_SNAKE_CASE:int = args.output_dir
Path(snake_case ).mkdir(exist_ok=snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = get_base_command(snake_case , snake_case )
# split each dimension into its --foo variations
SCREAMING_SNAKE_CASE:Union[str, Any] = [list(map(str.strip , re.split(r"\|" , snake_case ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
SCREAMING_SNAKE_CASE:str = list(map(str.strip , map(" ".join , itertools.product(*snake_case ) ) ) )
SCREAMING_SNAKE_CASE:Dict = max(len(snake_case ) for x in variations )
# split wanted keys
SCREAMING_SNAKE_CASE:Any = args.report_metric_keys.split()
# capture prints into a log file for convenience
SCREAMING_SNAKE_CASE:Any = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
SCREAMING_SNAKE_CASE:Any = Tee(snake_case )
print(F'''\n*** Running {len(snake_case )} benchmarks:''' )
print(F'''Base command: {" ".join(snake_case )}''' )
SCREAMING_SNAKE_CASE:str = "variation"
SCREAMING_SNAKE_CASE:Tuple = []
for id, variation in enumerate(tqdm(snake_case , desc="Total completion: " , leave=snake_case ) ):
SCREAMING_SNAKE_CASE:List[str] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case , snake_case , snake_case , snake_case , args.target_metric_key , snake_case , args.repeat_times , snake_case , args.verbose , ) )
process_results(snake_case , args.target_metric_key , snake_case , args.base_variation , snake_case )
if __name__ == "__main__":
main()
| 465 | 1 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
__snake_case = 1_0_0
__snake_case = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__snake_case = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def _A ( _lowercase ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__UpperCamelCase = set()
__UpperCamelCase = 42
__UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _A ( _lowercase = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , _lowercase ):
if len(partition(_lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A__ = parser.parse_args()
if args.model_type == "roberta":
A__ = RobertaForMaskedLM.from_pretrained(args.model_name)
A__ = '''roberta'''
elif args.model_type == "gpt2":
A__ = GPTaLMHeadModel.from_pretrained(args.model_name)
A__ = '''transformer'''
A__ = model.state_dict()
A__ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A__ = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A__ = f"""{prefix}.embeddings.{w}.weight"""
A__ = state_dict[param_name]
for w in ["weight", "bias"]:
A__ = f"""{prefix}.embeddings.LayerNorm.{w}"""
A__ = state_dict[param_name]
# Transformer Blocks #
A__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A__ = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
A__ = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A__ = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
A__ = state_dict[f"""lm_head.dense.{w}"""]
A__ = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A__ = state_dict[f"""{prefix}.ln_f.{w}"""]
A__ = state_dict['''lm_head.weight''']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 252 | 0 |
def snake_case__ ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
if not (isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase )):
raise ValueError("longest_common_substring() takes two strings for inputs" )
A__ : Optional[int] = len(__lowercase )
A__ : List[str] = len(__lowercase )
A__ : Optional[Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
A__ : Optional[int] = 0
A__ : List[Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
A__ : List[str] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
A__ : Optional[Any] = i
A__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCamelCase ):
__A : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , _A : List[str]=3_0522 , _A : Optional[int]=768 , _A : Union[str, Any]=12 , _A : str=12 , _A : Optional[int]=3072 , _A : Dict="gelu" , _A : List[str]=0.1 , _A : Any=0.1 , _A : str=512 , _A : Any=2 , _A : str=0.02 , _A : List[str]=1e-12 , _A : Optional[int]=1 , _A : Any=0 , _A : Any=2 , _A : List[Any]="absolute" , _A : Tuple=True , _A : str=None , **_A : List[Any] , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
A__ : Optional[int] = vocab_size
A__ : Tuple = hidden_size
A__ : str = num_hidden_layers
A__ : Union[str, Any] = num_attention_heads
A__ : Dict = hidden_act
A__ : List[Any] = intermediate_size
A__ : int = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Union[str, Any] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = initializer_range
A__ : str = layer_norm_eps
A__ : int = position_embedding_type
A__ : str = use_cache
A__ : int = classifier_dropout
class lowerCAmelCase__ ( UpperCamelCase ):
@property
def _lowercase ( self : Tuple):
if self.task == "multiple-choice":
A__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
]) | 182 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "Wav2Vec2FeatureExtractor"
UpperCAmelCase__ = "AutoTokenizer"
def __init__( self : List[Any] , __snake_case : int , __snake_case : int ) -> Optional[Any]:
super().__init__(__snake_case , __snake_case )
__magic_name__: Tuple = self.feature_extractor
__magic_name__: Optional[Any] = False
@classmethod
def lowerCamelCase__ ( cls : str , __snake_case : List[Any] , **__snake_case : Tuple ) -> Optional[Any]:
try:
return super().from_pretrained(__snake_case , **__snake_case )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , __snake_case , )
__magic_name__: Any = WavaVecaFeatureExtractor.from_pretrained(__snake_case , **__snake_case )
__magic_name__: Optional[int] = WavaVecaCTCTokenizer.from_pretrained(__snake_case , **__snake_case )
return cls(feature_extractor=__snake_case , tokenizer=__snake_case )
def __call__( self : List[str] , *__snake_case : Optional[Any] , **__snake_case : Any ) -> int:
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__magic_name__: int = kwargs.pop("""raw_speech""" )
else:
__magic_name__: Any = kwargs.pop("""audio""" , __snake_case )
__magic_name__: Union[str, Any] = kwargs.pop("""sampling_rate""" , __snake_case )
__magic_name__: Union[str, Any] = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
__magic_name__: List[str] = args[0]
__magic_name__: Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__magic_name__: Optional[int] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
__magic_name__: Dict = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__: Any = encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self : str , *__snake_case : List[Any] , **__snake_case : int ) -> Any:
if self._in_target_context_manager:
return self.current_processor.pad(*__snake_case , **__snake_case )
__magic_name__: Dict = kwargs.pop("""input_features""" , __snake_case )
__magic_name__: Dict = kwargs.pop("""labels""" , __snake_case )
if len(__snake_case ) > 0:
__magic_name__: List[Any] = args[0]
__magic_name__: Optional[Any] = args[1:]
if input_features is not None:
__magic_name__: str = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
if labels is not None:
__magic_name__: int = self.tokenizer.pad(__snake_case , **__snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__magic_name__: Dict = labels["""input_ids"""]
return input_features
def lowerCamelCase__ ( self : Dict , *__snake_case : List[Any] , **__snake_case : List[str] ) -> Dict:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Tuple , *__snake_case : List[Any] , **__snake_case : str ) -> Optional[int]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__magic_name__: List[Any] = True
__magic_name__: List[Any] = self.tokenizer
yield
__magic_name__: List[str] = self.feature_extractor
__magic_name__: List[Any] = False
| 96 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = 0
@slow
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
a = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__magic_name__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
a = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertIsInstance(__magic_name__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__magic_name__ ) , 0 )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = AutoConfig.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
# Check that tokenizer_type ≠ model_type
a = AutoTokenizer.from_pretrained(__magic_name__ , config=__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__magic_name__ , """vocab.txt""" ) )
a = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""bert""" , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__magic_name__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__magic_name__ , """merges.txt""" ) )
a = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""gpt2""" , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
@require_tokenizers
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__magic_name__ , """vocab.txt""" ) )
a = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""bert""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__magic_name__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__magic_name__ , """merges.txt""" ) )
a = AutoTokenizer.from_pretrained(__magic_name__ , tokenizer_type="""gpt2""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
with pytest.raises(__magic_name__ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
a = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __magic_name__ )
else:
self.assertEqual(tokenizer.do_lower_case , __magic_name__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__magic_name__ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
a = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = TOKENIZER_MAPPING.values()
a = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__magic_name__ )
@require_tokenizers
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__magic_name__ ) , __magic_name__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __magic_name__ )
@require_tokenizers
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__magic_name__ )
a = """Hello, world. How are you?"""
a = tokenizer.tokenize(__magic_name__ )
self.assertEqual("""[UNK]""" , tokens[0] )
a = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__magic_name__ )
a = tokenizer.tokenize(__magic_name__ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__magic_name__ ) , __magic_name__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
a = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = get_tokenizer_config("""bert-base-cased""" )
a = config.pop("""_commit_hash""" , __magic_name__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__magic_name__ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
a = get_tokenizer_config(__magic_name__ )
self.assertDictEqual(__magic_name__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
a = AutoTokenizer.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
a = get_tokenizer_config(__magic_name__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
a = CustomTokenizer.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
a = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , __magic_name__ )
# Can register in two steps
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__magic_name__ , slow_tokenizer_class=__magic_name__ , fast_tokenizer_class=__magic_name__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
a = BertTokenizerFast.from_pretrained(__magic_name__ )
bert_tokenizer.save_pretrained(__magic_name__ )
a = CustomTokenizerFast.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
a = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
a = AutoTokenizer.from_pretrained(__magic_name__ , use_fast=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
with self.assertRaises(__magic_name__ ):
a = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__magic_name__ ):
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
a = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
a = AutoTokenizer.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__magic_name__ )
a = AutoTokenizer.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = False
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = NewTokenizer
UpperCamelCase__ = False
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoTokenizer.register(__magic_name__ , fast_tokenizer_class=__magic_name__ )
# If remote code is not set, the default is to use local
a = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
a = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
a = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
__magic_name__ , """bert-base is not a local folder and is not a valid model identifier""" ):
a = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
__magic_name__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
a = AutoTokenizer.from_pretrained(__magic_name__ , revision="""aaaaaa""" )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
a = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 468 | 0 |
import os
def __snake_case ( ) -> Union[str, Any]:
_a = os.path.dirname(os.path.realpath(_UpperCamelCase ) )
_a = os.path.join(_UpperCamelCase , '''triangle.txt''' )
with open(_UpperCamelCase ) as f:
_a = f.readlines()
_a = []
for line in triangle:
_a = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_UpperCamelCase ) )
a.append(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
for j in range(len(a[i] ) ):
_a = a[i - 1][j] if j != len(a[i - 1] ) else 0
_a = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCamelCase , _UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 703 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCamelCase ) -> bytes:
if len(_UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCamelCase ) -> bytes:
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(_UpperCamelCase , '''08x''' )[-8:]
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCamelCase ) -> bytes:
_a = b''''''
for char in message:
bit_string += format(_UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(_UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCamelCase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCamelCase ) -> Generator[list[int], None, None]:
if len(_UpperCamelCase ) % 5_12 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCamelCase ) , 5_12 ):
_a = bit_string[pos : pos + 5_12]
_a = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCamelCase ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(_UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCamelCase , 2 )
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
return (a + b) % 2**32
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCamelCase ) -> bytes:
_a = preprocess(_UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67_45_23_01
_a = 0XEF_CD_AB_89
_a = 0X98_BA_DC_FE
_a = 0X10_32_54_76
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(_UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(_UpperCamelCase , left_rotate_aa(_UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 1 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __magic_name__ :
pass
| 30 | '''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_SCREAMING_SNAKE_CASE = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :str, snake_case :int, snake_case :Dict=7, snake_case :List[Any]=3, snake_case :int=18, snake_case :List[str]=30, snake_case :str=400, snake_case :int=None, snake_case :List[Any]=True, snake_case :Optional[int]=True, snake_case :Union[str, Any]=None, ):
"""simple docstring"""
_lowercase =size if size is not None else {'height': 20, 'width': 20}
_lowercase =parent
_lowercase =batch_size
_lowercase =num_channels
_lowercase =image_size
_lowercase =min_resolution
_lowercase =max_resolution
_lowercase =size
_lowercase =do_normalize
_lowercase =do_convert_rgb
_lowercase =[512, 1024, 2048, 4096]
_lowercase =patch_size if patch_size is not None else {'height': 16, 'width': 16}
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase ='https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_lowercase =Image.open(requests.get(snake_case, stream=snake_case).raw).convert('RGB')
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =PixaStructImageProcessingTester(self)
@property
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case, 'do_normalize'))
self.assertTrue(hasattr(snake_case, 'do_convert_rgb'))
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.image_processor_tester.prepare_dummy_image()
_lowercase =self.image_processing_class(**self.image_processor_dict)
_lowercase =2048
_lowercase =image_processor(snake_case, return_tensors='pt', max_patches=snake_case)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_6_0_6), atol=1e-3, rtol=1e-3))
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
_lowercase =image_processor(
snake_case, return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_lowercase =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case):
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case).flattened_patches
_lowercase ='Hello'
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case, header_text=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
_lowercase =image_processor(
snake_case, return_tensors='pt', max_patches=snake_case, header_text=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase =prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case, numpify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, np.ndarray)
_lowercase =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
_lowercase =image_processor(
snake_case, return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase =prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case, torchify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, torch.Tensor)
# Test not batched input
_lowercase =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
_lowercase =image_processor(
snake_case, return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =PixaStructImageProcessingTester(self, num_channels=4)
_lowercase =3
@property
def UpperCamelCase__ ( self :str):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case, 'do_normalize'))
self.assertTrue(hasattr(snake_case, 'do_convert_rgb'))
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
_lowercase =image_processor(
snake_case, return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 181 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
a__ : Any = TypeVar('_T')
class lowercase_ ( Generic[_T] ):
def __init__( self , a = None ):
UpperCamelCase__ = list(iterable or [] )
UpperCamelCase__ = []
def __len__( self ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def __a ( self , a ):
self._stacka.append(a )
def __a ( self ):
UpperCamelCase__ = self._stacka.pop
UpperCamelCase__ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod() | 706 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(__A , __A ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(__A , __A ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 | 0 |
"""simple docstring"""
from statistics import mean
import numpy as np
def _snake_case ( __snake_case : Any , __snake_case : int , __snake_case : Dict , __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : int = 0
# Number of processes finished
_lowerCamelCase : Any = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
_lowerCamelCase : str = [0] * no_of_process
# List to include calculation results
_lowerCamelCase : List[str] = [0] * no_of_process
# Sort by arrival time.
_lowerCamelCase : Tuple = [burst_time[i] for i in np.argsort(__snake_case )]
_lowerCamelCase : List[str] = [process_name[i] for i in np.argsort(__snake_case )]
arrival_time.sort()
while no_of_process > finished_process_count:
_lowerCamelCase : List[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
_lowerCamelCase : List[str] = arrival_time[i]
_lowerCamelCase : Any = 0
# Index showing the location of the process being performed
_lowerCamelCase : Union[str, Any] = 0
# Saves the current response ratio.
_lowerCamelCase : List[str] = 0
for i in range(0 , __snake_case ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
_lowerCamelCase : Any = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
_lowerCamelCase : Union[str, Any] = temp
_lowerCamelCase : Union[str, Any] = i
# Calculate the turn around time
_lowerCamelCase : List[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
_lowerCamelCase : Dict = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _snake_case ( __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = [0] * no_of_process
for i in range(0 , __snake_case ):
_lowerCamelCase : Any = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCAmelCase = 5
UpperCAmelCase = ['A', 'B', 'C', 'D', 'E']
UpperCAmelCase = [1, 2, 3, 4, 5]
UpperCAmelCase = [1, 2, 3, 4, 5]
UpperCAmelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCAmelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 88 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
lowerCamelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
lowerCamelCase : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """whisper"""
lowerCAmelCase__ : Dict = ["""past_key_values"""]
lowerCAmelCase__ : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : int , UpperCamelCase : Optional[int]=51865 , UpperCamelCase : Any=80 , UpperCamelCase : Dict=6 , UpperCamelCase : str=4 , UpperCamelCase : Optional[Any]=6 , UpperCamelCase : List[Any]=4 , UpperCamelCase : Tuple=1536 , UpperCamelCase : Dict=1536 , UpperCamelCase : Any=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : int=50257 , UpperCamelCase : List[str]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Dict=256 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Dict=0.02 , UpperCamelCase : List[Any]=False , UpperCamelCase : int=1500 , UpperCamelCase : List[str]=448 , UpperCamelCase : int=50256 , UpperCamelCase : Optional[int]=50256 , UpperCamelCase : Optional[Any]=50256 , UpperCamelCase : Any=None , UpperCamelCase : Tuple=[220, 50256] , UpperCamelCase : Optional[Any]=False , UpperCamelCase : int=256 , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=0.05 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Dict=10 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : Union[str, Any]=7 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = num_mel_bins
lowercase__ = d_model
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_ffn_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
lowercase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
lowercase__ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , suppress_tokens=UpperCamelCase , begin_suppress_tokens=UpperCamelCase , **UpperCamelCase , )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase__ = {0: '''batch'''}
else:
lowercase__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' )
return common_inputs
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , UpperCamelCase : int = 22050 , UpperCamelCase : float = 5.0 , UpperCamelCase : int = 220 , ):
'''simple docstring'''
lowercase__ = OrderedDict()
lowercase__ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase , framework=UpperCamelCase , sampling_rate=UpperCamelCase , time_duration=UpperCamelCase , frequency=UpperCamelCase , )
lowercase__ = encoder_inputs['''input_features'''].shape[2]
lowercase__ = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowercase__ = encoder_inputs.pop('''input_features''' )
lowercase__ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
lowercase__ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return 1E-3
| 460 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = GPTSwaTokenizer
__snake_case = False
__snake_case = True
__snake_case = False
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = '''This is a test'''
snake_case_ = '''This is a test'''
return input_text, output_text
def UpperCamelCase__ ( self ):
snake_case_ = '''<s>'''
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 20_00 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def UpperCamelCase__ ( self ):
snake_case_ = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE )
snake_case_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
snake_case_ = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
snake_case_ = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def UpperCamelCase__ ( self ):
snake_case_ = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE )
snake_case_ = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
snake_case_ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase__ ( self ):
snake_case_ = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
snake_case_ = {'''input_ids''': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , ) | 716 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None )-> Any:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = np.asarray(weights[0] )
snake_case_ = np.asarray(weights[1] )
snake_case_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = np.asarray(weights[0] )
snake_case_ = np.asarray(weights[1] )
snake_case_ = np.asarray(weights[2] )
snake_case_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = weights[0][0][0]
snake_case_ = np.asarray(layer_norm_a[0] )
snake_case_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# lsh weights + output
snake_case_ = weights[0][1]
if len(SCREAMING_SNAKE_CASE ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
# intermediate weighs
snake_case_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE ) == 4:
snake_case_ = intermediate_weights[2]
# layernorm 2
snake_case_ = np.asarray(intermediate_weights[0][0] )
snake_case_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate dense
snake_case_ = np.asarray(intermediate_weights[1][0] )
snake_case_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate out
snake_case_ = np.asarray(intermediate_weights[4][0] )
snake_case_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = torch_model.reformer
# word embeds
snake_case_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE ):
snake_case_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE ) )
snake_case_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# output layer norm
snake_case_ = np.asarray(weights[7][0] )
snake_case_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# output embeddings
snake_case_ = np.asarray(weights[9][0] )
snake_case_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
snake_case_ = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
snake_case_ = pickle.load(SCREAMING_SNAKE_CASE )['''weights''']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 531 | 0 |
def __snake_case ( __magic_name__ , __magic_name__ = 0 ):
'''simple docstring'''
lowercase = length or len(__magic_name__ )
lowercase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowercase , lowercase = list_data[i + 1], list_data[i]
lowercase = True
return list_data if not swapped else bubble_sort(__magic_name__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 441 |
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase , lowercase = [], []
while len(__magic_name__ ) > 1:
lowercase , lowercase = min(__magic_name__ ), max(__magic_name__ )
start.append(__magic_name__ )
end.append(__magic_name__ )
collection.remove(__magic_name__ )
collection.remove(__magic_name__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_snake_case : int = input("Enter numbers separated by a comma:\n").strip()
_snake_case : Dict = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 441 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : List[Any] ):
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
lowerCAmelCase = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def __lowercase ( self : int ):
lowerCAmelCase = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def __lowercase ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None ):
lowerCAmelCase = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(lowerCAmelCase , mode=lowerCAmelCase )
lowerCAmelCase = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(lowerCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase )
with open(lowerCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , lowerCAmelCase ) , )
# Copy consistency with a really long name
lowerCAmelCase = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub("""Bert""" , lowerCAmelCase , lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , lowerCAmelCase , overwrite_result=re.sub("""DDPM""" , """Test""" , lowerCAmelCase ) , )
| 706 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def lowercase (snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase = os.path.abspath(snake_case__ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
lowerCAmelCase = tf.train.list_variables(snake_case__ )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(snake_case__ )
# read data
lowerCAmelCase = tf.train.load_variable(snake_case__ , snake_case__ )
names.append("""/""".join(snake_case__ ) )
arrays.append(snake_case__ )
logger.info(f'''Read a total of {len(snake_case__ ):,} layers''' )
# Sanity check
if len(set(snake_case__ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(snake_case__ ) )})''' )
lowerCAmelCase = list(set(snake_case__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(snake_case__ , snake_case__ ):
lowerCAmelCase = full_name.split("""/""" )
lowerCAmelCase = model
lowerCAmelCase = []
for i, m_name in enumerate(snake_case__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowerCAmelCase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowerCAmelCase = getattr(snake_case__ , """encoder""" )
lowerCAmelCase = getattr(snake_case__ , """layer""" )
lowerCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """pooler""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """token_type_embeddings""" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
lowerCAmelCase = getattr(snake_case__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowerCAmelCase = getattr(snake_case__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowerCAmelCase = getattr(snake_case__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowerCAmelCase = getattr(snake_case__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """intermediate""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowerCAmelCase = getattr(snake_case__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowerCAmelCase = getattr(snake_case__ , """weight""" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
lowerCAmelCase = """.""".join(snake_case__ )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , snake_case__ ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , snake_case__ ):
lowerCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCAmelCase = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase = torch.from_numpy(snake_case__ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowercase (snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Any ) -> str:
'''simple docstring'''
logger.info(f'''Loading model based on config from {config_path}...''' )
lowerCAmelCase = BertConfig.from_json_file(snake_case__ )
lowerCAmelCase = BertModel(snake_case__ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
a = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 529 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] ) | 86 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("fixtures/test_sentencepiece.model")
a_ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
a_ = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = CamembertTokenizer
__UpperCamelCase = CamembertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def a_ ( self : List[str] ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_A = CamembertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = "<pad>"
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def a_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a__ ) , 10_04 )
def a_ ( self : List[str] ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = CamembertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
_A = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_A = "I was born in 92000, and this is falsé."
_A = tokenizer.encode(a__ )
_A = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
_A = tokenizer.encode(a__ , add_special_tokens=a__ )
_A = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_A = tokenizer.convert_ids_to_tokens(a__ )
_A = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
def a_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = "I was born in 92000, and this is falsé."
_A = tokenizer.tokenize(a__ )
_A = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_A = tokenizer.encode(a__ , add_special_tokens=a__ )
_A = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_A = self.get_rust_tokenizer()
_A = tokenizer.encode(a__ )
_A = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_A = {"input_ids": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_A = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=a__ , ) | 621 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
if (ksize % 2) == 0:
UpperCAmelCase__ : int = ksize + 1
UpperCAmelCase__ : int = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(lowerCAmelCase_ ):
for x in range(lowerCAmelCase_ ):
# distance from center
UpperCAmelCase__ : Any = x - ksize // 2
UpperCAmelCase__ : List[Any] = y - ksize // 2
# degree to radiant
UpperCAmelCase__ : Dict = theta / 180 * np.pi
UpperCAmelCase__ : Optional[Any] = np.cos(_theta )
UpperCAmelCase__ : List[Any] = np.sin(_theta )
# get kernel x
UpperCAmelCase__ : Any = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase__ : Tuple = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase__ : Optional[Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCamelCase__ = imread('../image_data/lena.jpg')
# turn image in gray scale value
UpperCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCamelCase__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
UpperCamelCase__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCamelCase__ = out / out.max() * 2_55
UpperCamelCase__ = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 110 |
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = array[indexa], array[indexa]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if length > 1:
lowerCamelCase_ : str = int(length / 2)
for i in range(lowerCAmelCase_ , low + middle):
comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_)
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if length > 1:
lowerCamelCase_ : Optional[int] = int(length / 2)
bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1)
bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0)
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 250 | 0 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Union[str, Any] = ''
lowerCAmelCase__ : Dict = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> str:
super().__init__(self ,**__UpperCAmelCase )
A__ = repo_info
A__ = token
A__ = None
def snake_case__ ( self ) -> List[Any]:
if self.dir_cache is None:
A__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
A__ = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(__UpperCAmelCase ): {'name': str(__UpperCAmelCase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = "rb" ,**__UpperCAmelCase ,) -> Optional[int]:
if not isinstance(self.repo_info ,__UpperCAmelCase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
A__ = hf_hub_url(self.repo_info.id ,__UpperCAmelCase ,revision=self.repo_info.sha )
return fsspec.open(
__UpperCAmelCase ,mode=__UpperCAmelCase ,headers=get_authentication_headers_for_url(__UpperCAmelCase ,use_auth_token=self.token ) ,client_kwargs={'trust_env': True} ,).open()
def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
self._get_dirs()
A__ = self._strip_protocol(__UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,**__UpperCAmelCase ) -> Tuple:
self._get_dirs()
A__ = PurePosixPath(path.strip('/' ) )
A__ = {}
for p, f in self.dir_cache.items():
A__ = PurePosixPath(p.strip('/' ) )
A__ = p.parent
if root == path:
A__ = f
A__ = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 536 | """simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__lowerCamelCase = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
__lowerCamelCase = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCamelCase = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
__lowerCamelCase = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
__lowerCamelCase = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
__lowerCamelCase = ""
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
A__ = ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ , suppress_parsing_errors=UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = expected_error.format(path=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
A__ = ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = expected_error.format(path=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ , suppress_parsing_errors=UpperCamelCase__ )
| 536 | 1 |
from math import pow
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCamelCase = int(pow(lowerCAmelCase , lowerCAmelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCamelCase , _UpperCamelCase = backtrack(
lowerCAmelCase , lowerCAmelCase , current_number + 1 , lowerCAmelCase , lowerCAmelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCamelCase , _UpperCamelCase = backtrack(
lowerCAmelCase , lowerCAmelCase , current_number + 1 , lowerCAmelCase , lowerCAmelCase )
return current_sum, solutions_count
def __A(lowerCAmelCase , lowerCAmelCase ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowerCAmelCase , lowerCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 612 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , a=0.01 , a=10_00 ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = p_stop
_UpperCamelCase = max_length
def __iter__( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCamelCase = random.random() < self.p_stop
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self , a , a , a=False , a=True ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [
BatchSamplerShard(a , 2 , a , split_batches=a , even_batches=a )
for i in range(2 )
]
_UpperCamelCase = [list(a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a ) for shard in batch_sampler_shards] , [len(a ) for e in expected] )
self.assertListEqual(a , a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_UpperCamelCase = [BatchSamplerShard(a , 2 , a , even_batches=a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A_ ( self , a , a , a , a=False , a=2 , a=False ) -> Any:
'''simple docstring'''
random.seed(a )
_UpperCamelCase = list(a )
_UpperCamelCase = [
IterableDatasetShard(
a , batch_size=a , drop_last=a , num_processes=a , process_index=a , split_batches=a , )
for i in range(a )
]
_UpperCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a )
iterable_dataset_lists.append(list(a ) )
_UpperCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(len(a ) % shard_batch_size == 0 )
_UpperCamelCase = []
for idx in range(0 , len(a ) , a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a ) < len(a ):
reference += reference
self.assertListEqual(a , reference[: len(a )] )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
# Edge case with a very small dataset
_UpperCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = SkipBatchSampler(a , 2 )
self.assertListEqual(list(a ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
_UpperCamelCase = skip_first_batches(a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
Accelerator()
_UpperCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 612 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowercase ( ) -> int:
__lowerCAmelCase : Union[str, Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" ,type=__snake_case ,default=1 ,help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" ,type=__snake_case ,help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) ,)
# rest from the training program
parser.add_argument("training_script_args" ,nargs=__snake_case )
return parser.parse_args()
def _lowercase ( ) -> str:
__lowerCAmelCase : int = parse_args()
# Import training_script as a module.
__lowerCAmelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase : Any = script_fpath.stem
__lowerCAmelCase : List[Any] = importlib.import_module(__snake_case )
# Patch sys.argv
__lowerCAmelCase : str = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main() | 705 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case : Tuple = logging.getLogger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'masked_bert'
def __init__( self: str , _SCREAMING_SNAKE_CASE: Tuple=3_0522 , _SCREAMING_SNAKE_CASE: Optional[int]=768 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Optional[int]=3072 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: List[str]=0.1 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Dict=512 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: int=1e-12 , _SCREAMING_SNAKE_CASE: int=0 , _SCREAMING_SNAKE_CASE: Tuple="topK" , _SCREAMING_SNAKE_CASE: Any="constant" , _SCREAMING_SNAKE_CASE: int=0.0 , **_SCREAMING_SNAKE_CASE: Any , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Dict = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : List[Any] = attention_probs_dropout_prob
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : str = type_vocab_size
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Tuple = pruning_method
__lowerCAmelCase : Union[str, Any] = mask_init
__lowerCAmelCase : str = mask_scale | 615 | 0 |
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :int , __magic_name__ :Dict , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = None
a = None
a = graph
self._normalize_graph(__snake_case , __snake_case )
a = len(__snake_case )
a = None
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :Tuple ):
'''simple docstring'''
if sources is int:
a = [sources]
if sinks is int:
a = [sinks]
if len(__snake_case ) == 0 or len(__snake_case ) == 0:
return
a = sources[0]
a = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__snake_case ) > 1 or len(__snake_case ) > 1:
a = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
a = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
a = max_input_flow
a = 0
a = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
a = max_input_flow
a = size - 1
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int ):
'''simple docstring'''
a = algorithm(self )
class __lowerCAmelCase :
def __init__( self :List[Any] , __magic_name__ :Any ):
'''simple docstring'''
a = flow_network
a = flow_network.verticesCount
a = flow_network.sourceIndex
a = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
a = flow_network.graph
a = False
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
if not self.executed:
self._algorithm()
a = True
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
pass
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self :Any , __magic_name__ :Any ):
'''simple docstring'''
super().__init__(__snake_case )
# use this to save your result
a = -1
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self :Union[str, Any] , __magic_name__ :Dict ):
'''simple docstring'''
super().__init__(__snake_case )
a = [[0] * self.verticies_count for i in range(self.verticies_count )]
a = [0] * self.verticies_count
a = [0] * self.verticies_count
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
a = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
a = 0
while i < len(__snake_case ):
a = vertices_list[i]
a = self.heights[vertex_index]
self.process_vertex(__snake_case )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__snake_case ) )
a = 0
else:
i += 1
a = sum(self.preflow[self.source_index] )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :str ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__snake_case , __snake_case )
self.relabel(__snake_case )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[str] ):
'''simple docstring'''
a = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
a = self.heights[to_index]
if min_height is not None:
a = min_height + 1
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = [0]
__UpperCamelCase : Optional[int] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__UpperCamelCase : Union[str, Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__UpperCamelCase : List[Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__UpperCamelCase : Any = flow_network.find_maximum_flow()
print(F'maximum flow is {maximum_flow}')
| 468 |
"""simple docstring"""
from __future__ import annotations
def a ( __UpperCAmelCase : list ) -> list:
if len(__UpperCAmelCase ) == 0:
return []
__magic_name__, __magic_name__: List[str] = min(__UpperCAmelCase ), max(__UpperCAmelCase )
__magic_name__: Optional[int] = int(max_value - min_value ) + 1
__magic_name__: list[list] = [[] for _ in range(__UpperCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(__UpperCAmelCase )
return [v for bucket in buckets for v in sorted(__UpperCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 96 | 0 |
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 713 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
snake_case_ : str =subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
snake_case_ : str =(
subprocess.check_output(f"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
)
snake_case_ : Any ='''|'''.join(sys.argv[1:])
snake_case_ : Optional[int] =re.compile(rf"""^({joined_dirs}).*?\.py$""")
snake_case_ : str =[x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 205 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16000 ) -> str:
_snake_case = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase_ ) <= sample_length:
return wav
_snake_case = randint(0 , len(lowerCAmelCase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
A__ : Optional[str] = field(default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Name of a dataset from the datasets package'''} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
A__ : str = field(
default='''train''',metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
},)
A__ : str = field(
default='''validation''',metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
},)
A__ : str = field(
default='''audio''',metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''},)
A__ : str = field(
default='''label''',metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
A__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
},)
A__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
},)
A__ : float = field(
default=20,metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''},)
@dataclass
class UpperCAmelCase :
A__ : str = field(
default='''facebook/wav2vec2-base''',metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''},)
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
A__ : str = field(
default='''main''',metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''},)
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Name or path of preprocessor config.'''} )
A__ : bool = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
A__ : bool = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
A__ : bool = field(
default=__SCREAMING_SNAKE_CASE,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
},)
A__ : Optional[bool] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
A__ : bool = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''},)
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def snake_case ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_snake_case = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_snake_case = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_snake_case = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase_ ):
_snake_case = []
for audio in batch[data_args.audio_column_name]:
_snake_case = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase_ )
_snake_case = feature_extractor(lowerCAmelCase_ , sampling_rate=feature_extractor.sampling_rate )
_snake_case = {model_input_name: inputs.get(lowerCAmelCase_ )}
_snake_case = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase_ ):
_snake_case = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
_snake_case = feature_extractor(lowerCAmelCase_ , sampling_rate=feature_extractor.sampling_rate )
_snake_case = {model_input_name: inputs.get(lowerCAmelCase_ )}
_snake_case = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_snake_case = raw_datasets['''train'''].features[data_args.label_column_name].names
_snake_case , _snake_case = {}, {}
for i, label in enumerate(lowerCAmelCase_ ):
_snake_case = str(lowerCAmelCase_ )
_snake_case = label
# Load the accuracy metric from the datasets package
_snake_case = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ ):
_snake_case = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase_ , references=eval_pred.label_ids )
_snake_case = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase_ ) , labelaid=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_snake_case = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase_ , output_all_columns=lowerCAmelCase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_snake_case = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase_ , output_all_columns=lowerCAmelCase_ )
# Initialize our trainer
_snake_case = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , )
# Training
if training_args.do_train:
_snake_case = None
if training_args.resume_from_checkpoint is not None:
_snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case = last_checkpoint
_snake_case = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCAmelCase_ )
trainer.save_metrics('''eval''' , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
_snake_case = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 103 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( __snake_case : Tuple ) -> Dict:
"""simple docstring"""
A__ , A__ : Dict =image.size
A__ , A__ : Optional[Any] =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A__ : Tuple =image.resize((w, h), resample=PIL_INTERPOLATION["""lanczos"""] )
A__ : List[Any] =np.array(__snake_case ).astype(np.floataa ) / 2_55.0
A__ : List[str] =image[None].transpose(0, 3, 1, 2 )
A__ : Dict =torch.from_numpy(__snake_case )
return 2.0 * image - 1.0
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : VQModel , lowerCAmelCase_ : UNetaDModel , lowerCAmelCase_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : Tuple , lowerCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[int] = 1_00 , lowerCAmelCase_ : Optional[float] = 0.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
A__ : Optional[int] =1
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
A__ : Union[str, Any] =image.shape[0]
else:
raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase_ )}" )
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
A__ : Optional[int] =preprocess(lowerCAmelCase_ )
A__ , A__ : Tuple =image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A__ : Union[str, Any] =(batch_size, self.unet.config.in_channels // 2, height, width)
A__ : Tuple =next(self.unet.parameters() ).dtype
A__ : Union[str, Any] =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
A__ : int =image.to(device=self.device , dtype=lowerCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase_ , device=self.device )
A__ : Union[str, Any] =self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A__ : Optional[int] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ : Tuple ="""eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ : Tuple ={}
if accepts_eta:
A__ : List[str] =eta
for t in self.progress_bar(lowerCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
A__ : Any =torch.cat([latents, image] , dim=1 )
A__ : int =self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
A__ : Tuple =self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A__ : Any =self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
A__ : Optional[Any] =self.vqvae.decode(lowerCAmelCase_ ).sample
A__ : List[str] =torch.clamp(lowerCAmelCase_ , -1.0 , 1.0 )
A__ : Optional[int] =image / 2 + 0.5
A__ : str =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : Tuple =self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 215 | 0 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def snake_case ( UpperCamelCase__ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
lowerCamelCase : Optional[int] = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple[int, ...] ) -> Tuple[int, ...]:
lowerCamelCase : str = []
for d in reversed(UpperCamelCase__ ):
idx.append(flat_idx % d )
lowerCamelCase : Any = flat_idx // d
return tuple(reversed(UpperCamelCase__ ) )
@torch.jit.ignore
def snake_case ( UpperCamelCase__ : Sequence[int] , UpperCamelCase__ : Sequence[int] , UpperCamelCase__ : Sequence[int] , UpperCamelCase__ : Optional[Sequence[bool]] = None , UpperCamelCase__ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(UpperCamelCase__ : List[bool] ) -> None:
lowerCamelCase : str = True
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : int = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCamelCase : Dict = l[reversed_idx]
if start_edges is None:
lowerCamelCase : Optional[int] = [s == 0 for s in start]
reduce_edge_list(UpperCamelCase__ )
if end_edges is None:
lowerCamelCase : List[str] = [e == (d - 1) for e, d in zip(UpperCamelCase__ , UpperCamelCase__ )]
reduce_edge_list(UpperCamelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCamelCase__ ) == 0:
return [()]
elif len(UpperCamelCase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCamelCase : List[Tuple[slice, ...]] = []
lowerCamelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCamelCase__ , UpperCamelCase__ ):
if s == e:
path_list.append(slice(UpperCamelCase__ , s + 1 ) )
else:
break
lowerCamelCase : Tuple[slice, ...] = tuple(UpperCamelCase__ )
lowerCamelCase : List[str] = len(UpperCamelCase__ )
# start == end, and we're done
if divergence_idx == len(UpperCamelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase : str = end[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCamelCase : Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def snake_case ( UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> torch.Tensor:
lowerCamelCase : str = t.shape[:no_batch_dims]
lowerCamelCase : Dict = list(_flat_idx_to_idx(UpperCamelCase__ , UpperCamelCase__ ) )
# _get_minimal_slice_set is inclusive
lowerCamelCase : List[str] = list(_flat_idx_to_idx(flat_end - 1 , UpperCamelCase__ ) )
# Get an ordered list of slices to perform
lowerCamelCase : Any = _get_minimal_slice_set(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
lowerCamelCase : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def snake_case ( UpperCamelCase__ : Callable , UpperCamelCase__ : Dict[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool = False , UpperCamelCase__ : Any = None , UpperCamelCase__ : bool = False , ) -> Any:
if not (len(UpperCamelCase__ ) > 0):
raise ValueError("""Must provide at least one input""" )
lowerCamelCase : str = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCamelCase__ )]
lowerCamelCase : Any = tuple([max(UpperCamelCase__ ) for s in zip(*UpperCamelCase__ )] )
def _prep_inputs(UpperCamelCase__ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCamelCase : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCamelCase : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCamelCase : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCamelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , UpperCamelCase__ )
lowerCamelCase : str = None
if _out is not None:
lowerCamelCase : Union[str, Any] = tensor_tree_map(lambda UpperCamelCase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCamelCase : int = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCamelCase : Optional[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCamelCase__ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCamelCase : int = 0
lowerCamelCase : Optional[int] = prepped_outputs
for _ in range(UpperCamelCase__ ):
# Chunk the input
if not low_mem:
lowerCamelCase : str = _select_chunk
else:
lowerCamelCase : Union[str, Any] = partial(
_chunk_slice , flat_start=UpperCamelCase__ , flat_end=min(UpperCamelCase__ , i + chunk_size ) , no_batch_dims=len(UpperCamelCase__ ) , )
lowerCamelCase : Dict[str, Any] = tensor_tree_map(UpperCamelCase__ , UpperCamelCase__ )
# Run the layer on the chunk
lowerCamelCase : Any = layer(**UpperCamelCase__ )
# Allocate space for the output
if out is None:
lowerCamelCase : List[str] = tensor_tree_map(lambda UpperCamelCase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCamelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
def assign(UpperCamelCase__ : dict , UpperCamelCase__ : dict ) -> None:
for k, v in da.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
assign(UpperCamelCase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCamelCase : List[Any] = da[k]
assign(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for xa, xa in zip(UpperCamelCase__ , UpperCamelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCamelCase : Union[str, Any] = xa
elif isinstance(UpperCamelCase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCamelCase : List[Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
lowerCamelCase : Optional[int] = tensor_tree_map(lambda UpperCamelCase__ : t.view(orig_batch_dims + t.shape[1:] ) , UpperCamelCase__ )
return out
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: int = 512 , )-> Optional[int]:
lowerCamelCase : List[Any] = max_chunk_size
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[tuple] = None
def a__ ( self: Union[str, Any] , __a: Callable , __a: tuple , __a: int )-> int:
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCamelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCamelCase : Union[str, Any] = [c for c in candidates if c > min_chunk_size]
lowerCamelCase : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a: int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Optional[int] = len(__a ) - 1
while i > min_viable_chunk_size_index:
lowerCamelCase : List[str] = test_chunk_size(candidates[i] )
if not viable:
lowerCamelCase : int = (min_viable_chunk_size_index + i) // 2
else:
lowerCamelCase : Union[str, Any] = i
lowerCamelCase : Dict = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def a__ ( self: Dict , __a: Iterable , __a: Iterable )-> bool:
lowerCamelCase : Dict = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
lowerCamelCase : str = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
lowerCamelCase : List[str] = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def a__ ( self: Tuple , __a: Callable , __a: tuple , __a: int , )-> int:
lowerCamelCase : List[str] = True
lowerCamelCase : tuple = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
lowerCamelCase : Any = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
lowerCamelCase : int = False
if not consistent:
lowerCamelCase : Optional[Any] = self._determine_favorable_chunk_size(
__a , __a , __a , )
lowerCamelCase : List[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 42 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42 | 1 |
"""simple docstring"""
__snake_case : Tuple = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def a_ ( __a ):
assert type(__a ) in (int, float) and decimal == int(__a )
A__ = int(__a )
A__ = ''''''
A__ = False
if decimal < 0:
A__ = True
decimal *= -1
while decimal > 0:
A__ , A__ = divmod(__a , 16 )
A__ = values[remainder] + hexadecimal
A__ = '''0x''' + hexadecimal
if negative:
A__ = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571 |
"""simple docstring"""
import argparse
import copy
def a_ ( __a ):
A__ = {}
with open(__a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A__ = []
_list.append([line.split()[1], line.split()[2]] )
A__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A__ = []
_list.append([line.split()[0], line.split()[2]] )
A__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a_ ( __a , __a ):
with open(__a ) as f:
A__ = f.read(1 )
A__ = start_node
A__ = []
A__ = start_node
A__ = 0
while visiting not in first_solution:
A__ = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__a ) and k[0] not in first_solution:
A__ = k[1]
A__ = k[0]
first_solution.append(__a )
A__ = distance_of_first_solution + int(__a )
A__ = best_node
first_solution.append(__a )
A__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def a_ ( __a , __a ):
A__ = []
for n in solution[1:-1]:
A__ = solution.index(__a )
for kn in solution[1:-1]:
A__ = solution.index(__a )
if n == kn:
continue
A__ = copy.deepcopy(__a )
A__ = kn
A__ = n
A__ = 0
for k in _tmp[:-1]:
A__ = _tmp[_tmp.index(__a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A__ = distance + int(i[1] )
_tmp.append(__a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a_ ( __a , __a , __a , __a , __a ):
A__ = 1
A__ = first_solution
A__ = []
A__ = distance_of_first_solution
A__ = solution
while count <= iters:
A__ = find_neighborhood(__a , __a )
A__ = 0
A__ = neighborhood[index_of_best_solution]
A__ = len(__a ) - 1
A__ = False
while not found:
A__ = 0
while i < len(__a ):
if best_solution[i] != solution[i]:
A__ = best_solution[i]
A__ = solution[i]
break
A__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A__ = True
A__ = best_solution[:-1]
A__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A__ = cost
A__ = solution
else:
A__ = index_of_best_solution + 1
A__ = neighborhood[index_of_best_solution]
if len(__a ) >= size:
tabu_list.pop(0 )
A__ = count + 1
return best_solution_ever, best_cost
def a_ ( __a=None ):
A__ = generate_neighbours(args.File )
A__ , A__ = generate_first_solution(
args.File , __a )
A__ , A__ = tabu_search(
__a , __a , __a , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 571 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Any = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 182 |
snake_case : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def snake_case__ ( ) -> None:
"""simple docstring"""
A__ : Union[str, Any] = input("Enter message: " )
A__ : Tuple = input("Enter key [alphanumeric]: " )
A__ : Optional[Any] = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
A__ : Tuple = "encrypt"
A__ : str = encrypt_message(__lowercase , __lowercase )
elif mode.lower().startswith("d" ):
A__ : Optional[Any] = "decrypt"
A__ : Union[str, Any] = decrypt_message(__lowercase , __lowercase )
print(F'\n{mode.title()}ed message:' )
print(__lowercase )
def snake_case__ ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
return translate_message(__lowercase , __lowercase , "encrypt" )
def snake_case__ ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
return translate_message(__lowercase , __lowercase , "decrypt" )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
A__ : Dict = []
A__ : Union[str, Any] = 0
A__ : List[Any] = key.upper()
for symbol in message:
A__ : List[str] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowercase ):
A__ : Optional[int] = 0
else:
translated.append(__lowercase )
return "".join(__lowercase )
if __name__ == "__main__":
main() | 182 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
UpperCAmelCase__ : Optional[int] = size if size is not None else {"""shortest_edge""": 18}
UpperCAmelCase__ : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : Optional[Any] = num_frames
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : List[str] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : List[Any] = image_mean
UpperCAmelCase__ : Union[str, Any] = image_std
UpperCAmelCase__ : List[str] = crop_size
def __UpperCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = VivitImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = VivitImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __UpperCAmelCase ( self ):
# Initialize image_processing
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCAmelCase__ : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCAmelCase__ : Tuple = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase__ : str = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase__ : int = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processing
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : str = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase__ : str = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 79 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def lowercase ( a , a , a , a = 100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :str = x_start
SCREAMING_SNAKE_CASE_ :List[str] = fnc(a )
SCREAMING_SNAKE_CASE_ :Dict = 0.0
for _ in range(a ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE_ :Dict = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE_ :List[str] = fnc(a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE_ :Dict = xa
SCREAMING_SNAKE_CASE_ :str = fxa
return length
if __name__ == "__main__":
def lowercase ( a ):
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
SCREAMING_SNAKE_CASE__ = 10
while i <= 100_000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 717 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Collection[float] | None = None):
if components is None:
SCREAMING_SNAKE_CASE_ :List[str] = []
SCREAMING_SNAKE_CASE_ :Optional[int] = list(UpperCAmelCase)
def __len__( self : Optional[Any]):
return len(self.__components)
def __str__( self : List[Any]):
return "(" + ",".join(map(UpperCAmelCase , self.__components)) + ")"
def __add__( self : Optional[int] , UpperCAmelCase : Vector):
SCREAMING_SNAKE_CASE_ :List[str] = len(self)
if size == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [self.__components[i] + other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return Vector(UpperCAmelCase)
else:
raise Exception("must have the same size")
def __sub__( self : List[str] , UpperCAmelCase : Vector):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(self)
if size == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Dict = [self.__components[i] - other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return Vector(UpperCAmelCase)
else: # error case
raise Exception("must have the same size")
@overload
def __mul__( self : List[Any] , UpperCAmelCase : float):
...
@overload
def __mul__( self : int , UpperCAmelCase : Vector):
...
def __mul__( self : int , UpperCAmelCase : float | Vector):
if isinstance(UpperCAmelCase , (float, int)):
SCREAMING_SNAKE_CASE_ :Tuple = [c * other for c in self.__components]
return Vector(UpperCAmelCase)
elif isinstance(UpperCAmelCase , UpperCAmelCase) and len(self) == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Optional[int] = len(self)
SCREAMING_SNAKE_CASE_ :str = [self.__components[i] * other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return sum(UpperCAmelCase)
else: # error case
raise Exception("invalid operand!")
def _snake_case ( self : Any):
return Vector(self.__components)
def _snake_case ( self : str , UpperCAmelCase : int):
if isinstance(UpperCAmelCase , UpperCAmelCase) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("index out of range")
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : float):
assert -len(self.__components) <= pos < len(self.__components)
SCREAMING_SNAKE_CASE_ :List[str] = value
def _snake_case ( self : str):
if len(self.__components) == 0:
raise Exception("Vector is empty")
SCREAMING_SNAKE_CASE_ :Optional[int] = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCAmelCase))
def _snake_case ( self : str , UpperCAmelCase : Vector , UpperCAmelCase : bool = False):
SCREAMING_SNAKE_CASE_ :Optional[Any] = self * other
SCREAMING_SNAKE_CASE_ :Dict = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def lowercase ( a ):
'''simple docstring'''
assert isinstance(a , a )
return Vector([0] * dimension )
def lowercase ( a , a ):
'''simple docstring'''
assert isinstance(a , a ) and (isinstance(a , a ))
SCREAMING_SNAKE_CASE_ :str = [0] * dimension
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 1
return Vector(a )
def lowercase ( a , a , a ):
'''simple docstring'''
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def lowercase ( a , a , a ):
'''simple docstring'''
random.seed(a )
SCREAMING_SNAKE_CASE_ :int = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _UpperCAmelCase :
def __init__( self : Optional[int] , UpperCAmelCase : list[list[float]] , UpperCAmelCase : int , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :str = matrix
SCREAMING_SNAKE_CASE_ :List[Any] = w
SCREAMING_SNAKE_CASE_ :List[Any] = h
def __str__( self : List[str]):
SCREAMING_SNAKE_CASE_ :Optional[Any] = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self : Union[str, Any] , UpperCAmelCase : Matrix):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ :Any = []
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :str = [
self.__matrix[i][j] + other.component(UpperCAmelCase , UpperCAmelCase)
for j in range(self.__width)
]
matrix.append(UpperCAmelCase)
return Matrix(UpperCAmelCase , self.__width , self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __sub__( self : Union[str, Any] , UpperCAmelCase : Matrix):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ :List[Any] = []
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [
self.__matrix[i][j] - other.component(UpperCAmelCase , UpperCAmelCase)
for j in range(self.__width)
]
matrix.append(UpperCAmelCase)
return Matrix(UpperCAmelCase , self.__width , self.__height)
else:
raise Exception("matrices must have the same dimension!")
@overload
def __mul__( self : Tuple , UpperCAmelCase : float):
...
@overload
def __mul__( self : Optional[Any] , UpperCAmelCase : Vector):
...
def __mul__( self : List[str] , UpperCAmelCase : float | Vector):
if isinstance(UpperCAmelCase , UpperCAmelCase): # matrix-vector
if len(UpperCAmelCase) == self.__width:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = zero_vector(self.__height)
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :Optional[Any] = [
self.__matrix[i][j] * other.component(UpperCAmelCase)
for j in range(self.__width)
]
ans.change_component(UpperCAmelCase , sum(UpperCAmelCase))
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!")
elif isinstance(UpperCAmelCase , (int, float)): # matrix-scalar
SCREAMING_SNAKE_CASE_ :Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(UpperCAmelCase , self.__width , self.__height)
return None
def _snake_case ( self : Optional[int]):
return self.__height
def _snake_case ( self : Optional[int]):
return self.__width
def _snake_case ( self : str , UpperCAmelCase : int , UpperCAmelCase : int):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds")
def _snake_case ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float):
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE_ :Dict = value
else:
raise Exception("change_component: indices out of bounds")
def _snake_case ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int):
if self.__height != self.__width:
raise Exception("Matrix is not square")
SCREAMING_SNAKE_CASE_ :Dict = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCAmelCase)):
SCREAMING_SNAKE_CASE_ :Dict = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCAmelCase , self.__width - 1 , self.__height - 1).determinant()
def _snake_case ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int):
if self.__height != self.__width:
raise Exception("Matrix is not square")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCAmelCase , UpperCAmelCase)
else:
raise Exception("Indices out of bounds")
def _snake_case ( self : Union[str, Any]):
if self.__height != self.__width:
raise Exception("Matrix is not square")
if self.__height < 1:
raise Exception("Matrix has no element")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE_ :str = [
self.__matrix[0][y] * self.cofactor(0 , UpperCAmelCase) for y in range(self.__width)
]
return sum(UpperCAmelCase)
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def lowercase ( a , a , a , a ):
'''simple docstring'''
random.seed(a )
SCREAMING_SNAKE_CASE_ :list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 140 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.