code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( snake_case_ ):
'''simple docstring'''
a__ : str = ["""input_features""", """attention_mask"""]
def __init__( self , __lowercase=80 , __lowercase=16_000 , __lowercase=0.0 , __lowercase=10 , __lowercase=25 , __lowercase="hamming_window" , __lowercase=32_768.0 , __lowercase=0.97 , __lowercase=1.0 , __lowercase=True , __lowercase=True , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_)
__UpperCamelCase :Tuple = feature_size
__UpperCamelCase :Tuple = sampling_rate
__UpperCamelCase :Dict = padding_value
__UpperCamelCase :int = hop_length
__UpperCamelCase :Union[str, Any] = win_length
__UpperCamelCase :List[str] = frame_signal_scale
__UpperCamelCase :Any = preemphasis_coeff
__UpperCamelCase :Any = mel_floor
__UpperCamelCase :Optional[int] = normalize_means
__UpperCamelCase :List[Any] = normalize_vars
__UpperCamelCase :Union[str, Any] = win_function
__UpperCamelCase :List[str] = return_attention_mask
__UpperCamelCase :Optional[int] = win_length * sampling_rate // 1_000
__UpperCamelCase :Dict = hop_length * sampling_rate // 1_000
__UpperCamelCase :str = optimal_fft_length(self.sample_size)
__UpperCamelCase :Tuple = (self.n_fft // 2) + 1
def UpperCamelCase__ ( self , __lowercase) -> np.ndarray:
if self.win_function == "hamming_window":
__UpperCamelCase :int = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_)
else:
__UpperCamelCase :Tuple = window_function(window_length=self.sample_size , name=self.win_function)
__UpperCamelCase :int = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__UpperCamelCase :str = spectrogram(
one_waveform * self.frame_signal_scale , window=A_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=A_ , preemphasis=self.preemphasis_coeff , mel_filters=A_ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Dict:
if self.normalize_means:
__UpperCamelCase :List[Any] = x[:input_length].mean(axis=0)
__UpperCamelCase :Optional[Any] = np.subtract(A_ , A_)
if self.normalize_vars:
__UpperCamelCase :Optional[Any] = x[:input_length].std(axis=0)
__UpperCamelCase :str = np.divide(A_ , A_)
if input_length < x.shape[0]:
__UpperCamelCase :Tuple = padding_value
# make sure array is in float32
__UpperCamelCase :List[Any] = x.astype(np.floataa)
return x
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[np.ndarray]:
__UpperCamelCase :List[str] = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A_ , A_ , self.padding_value) for x, n in zip(A_ , A_)]
def __call__( self , __lowercase , __lowercase = False , __lowercase = None , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , **__lowercase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
__UpperCamelCase :List[Any] = isinstance(A_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""")
__UpperCamelCase :Optional[Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__UpperCamelCase :List[Any] = [np.asarray(A_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray):
__UpperCamelCase :Optional[int] = np.asarray(A_ , dtype=np.floataa)
elif isinstance(A_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__UpperCamelCase :Optional[int] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCamelCase :Tuple = [raw_speech]
# extract fbank features
__UpperCamelCase :Tuple = [self._extract_mfsc_features(A_) for one_waveform in raw_speech]
# convert into correct format for padding
__UpperCamelCase :List[Any] = BatchFeature({'''input_features''': features})
__UpperCamelCase :Union[str, Any] = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
# make sure list is in array format
__UpperCamelCase :Tuple = padded_inputs.get('''input_features''')
if isinstance(input_features[0] , A_):
__UpperCamelCase :List[str] = [np.asarray(A_ , dtype=np.floataa) for feature in input_features]
__UpperCamelCase :Union[str, Any] = padded_inputs.get('''attention_mask''')
if attention_mask is not None:
__UpperCamelCase :int = [np.asarray(A_ , dtype=np.intaa) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__UpperCamelCase :Dict = (
np.array(A_ , dtype=np.intaa)
if self._get_padding_strategies(A_ , max_length=A_) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__UpperCamelCase :Optional[int] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=A_)
if return_tensors is not None:
__UpperCamelCase :List[str] = padded_inputs.convert_to_tensors(A_)
return padded_inputs
| 167 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Optional[int] , A_ : Optional[int] , A_ : Union[str, Any]=3 , A_ : Dict=3_2 , A_ : Tuple=3 , A_ : Union[str, Any]=1_0 , A_ : Optional[int]=[1_0, 2_0, 3_0, 4_0] , A_ : Tuple=[1, 1, 2, 1] , A_ : List[Any]=True , A_ : Any=True , A_ : List[str]="relu" , A_ : List[Any]=3 , A_ : List[str]=None , ):
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : List[Any] = image_size
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Any = embeddings_size
lowerCAmelCase_ : Union[str, Any] = hidden_sizes
lowerCAmelCase_ : List[Any] = depths
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : List[Any] = num_labels
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Union[str, Any] = len(A_)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase_ : List[str] = self.get_config()
return config, pixel_values
def UpperCAmelCase__ ( self : int):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple , A_ : Union[str, Any]):
lowerCAmelCase_ : Union[str, Any] = FlaxRegNetModel(config=A_)
lowerCAmelCase_ : Dict = model(A_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[Any] , A_ : Any):
lowerCAmelCase_ : List[Any] = self.num_labels
lowerCAmelCase_ : str = FlaxRegNetForImageClassification(config=A_)
lowerCAmelCase_ : List[str] = model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = config_and_inputs
lowerCAmelCase_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( snake_case_ ,unittest.TestCase ):
_a = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[Any] = FlaxRegNetModelTester(self)
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=A_ , has_text_modality=A_)
def UpperCAmelCase__ ( self : Dict):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Dict):
return
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def UpperCAmelCase__ ( self : Union[str, Any]):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def UpperCAmelCase__ ( self : List[str]):
pass
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : int = model_class(A_)
lowerCAmelCase_ : int = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
def UpperCAmelCase__ ( self : Tuple):
def check_hidden_states_output(A_ : Optional[int] , A_ : str , A_ : Union[str, Any]):
lowerCAmelCase_ : Tuple = model_class(A_)
lowerCAmelCase_ : List[Any] = model(**self._prepare_for_class(A_ , A_))
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(A_) , expected_num_stages + 1)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = True
check_hidden_states_output(A_ , A_ , A_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : Optional[int] = True
check_hidden_states_output(A_ , A_ , A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCAmelCase_ : List[str] = self._prepare_for_class(A_ , A_)
lowerCAmelCase_ : Any = model_class(A_)
@jax.jit
def model_jitted(A_ : List[Any] , **A_ : Any):
return model(pixel_values=A_ , **A_)
with self.subTest('''JIT Enabled'''):
lowerCAmelCase_ : Dict = model_jitted(**A_).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
lowerCAmelCase_ : Any = model_jitted(**A_).to_tuple()
self.assertEqual(len(A_) , len(A_))
for jitted_output, output in zip(A_ , A_):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Dict):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : List[str] = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Tuple = image_processor(images=A_ , return_tensors='''np''')
lowerCAmelCase_ : Optional[Any] = model(**A_)
# verify the logits
lowerCAmelCase_ : int = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , A_)
lowerCAmelCase_ : List[str] = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , A_ , atol=1e-4))
| 171 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None )-> Optional[Any]:
'''simple docstring'''
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ )-> List[Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self , A_ , A_ = False )-> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
self.generated_responses.append(A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self )-> Any:
'''simple docstring'''
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
snake_case_ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self , A_ , A_=32 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=10 , **A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=True )-> Tuple:
'''simple docstring'''
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 3 | 0 |
import requests
lowerCAmelCase__: List[str] = '' # <-- Put your OpenWeatherMap appid here!
lowerCAmelCase__: Tuple = 'https://api.openweathermap.org/data/2.5/'
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = "Chicago" , SCREAMING_SNAKE_CASE = APPID ) -> List[str]:
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = "Kolkata, India" , SCREAMING_SNAKE_CASE = APPID ) -> List[Any]:
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 55.68 , SCREAMING_SNAKE_CASE = 12.57 , SCREAMING_SNAKE_CASE = APPID ) -> Dict:
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCAmelCase__: Optional[int] = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 345 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3 | 0 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __SCREAMING_SNAKE_CASE ( a__ : Features ) -> Optional[int]:
__A : List[Any] = np.inf
def set_batch_size(a__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(a__ ,a__ ):
__A : List[Any] = min(a__ ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(a__ ,a__ ):
__A : Union[str, Any] = min(a__ ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(a__ ,a__ ) and feature.dtype == "binary":
__A : Tuple = min(a__ ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(a__ ,a__ )
return None if batch_size is np.inf else batch_size
class lowerCamelCase_ ( snake_case_ ):
def __init__( self : int , __A : Dict , __A : int = None , __A : List[Any] = None , __A : Optional[int] = None , __A : Optional[int] = False , __A : Optional[int] = False , __A : Tuple = None , **__A : Union[str, Any] , ):
super().__init__(
A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
__A : Union[str, Any] = path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths}
__A : List[Any] = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
__A : Dict = Parquet(
cache_dir=A_ , data_files=A_ , features=A_ , hash=A_ , **A_ , )
def lowerCAmelCase_ ( self : str ):
if self.streaming:
__A : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__A : int = None
__A : int = None
__A : Tuple = None
__A : List[Any] = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
__A : int = self.builder.as_dataset(
split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self : str , __A : int , __A : Optional[int] , __A : Union[str, Any] = None , **__A : List[Any] , ):
__A : Optional[Any] = dataset
__A : str = path_or_buf
__A : List[str] = batch_size or get_writer_batch_size(dataset.features )
__A : Dict = parquet_writer_kwargs
def lowerCAmelCase_ ( self : List[str] ):
__A : List[Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
__A : List[str] = self._write(file_obj=A_ , batch_size=A_ , **self.parquet_writer_kwargs )
else:
__A : Optional[int] = self._write(file_obj=self.path_or_buf , batch_size=A_ , **self.parquet_writer_kwargs )
return written
def lowerCAmelCase_ ( self : Tuple , __A : List[str] , __A : Union[str, Any] , **__A : List[str] ):
__A : str = 0
__A : Dict = parquet_writer_kwargs.pop("""path_or_buf""" , A_ )
__A : List[str] = self.dataset.features.arrow_schema
__A : List[str] = pq.ParquetWriter(A_ , schema=A_ , **A_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , A_ ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
__A : Optional[Any] = query_table(
table=self.dataset._data , key=slice(A_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(A_ )
written += batch.nbytes
writer.close()
return written
| 17 |
'''simple docstring'''
import numpy as np
def A_( A : str , A : Optional[Any] , A : Tuple , A : Optional[int] , A : str):
UpperCamelCase = int(np.ceil((x_end - xa) / h))
UpperCamelCase = np.zeros((n + 1,))
UpperCamelCase = ya
UpperCamelCase = xa
for k in range(A):
UpperCamelCase = f(A , y[k])
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + h , y[k] + h * ka)
UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( snake_case_ ):
"""simple docstring"""
__lowerCAmelCase = ["""audio_values""", """audio_mask"""]
def __init__( self , snake_case_=2048 , snake_case_=1 , snake_case_=[16, 16] , snake_case_=128 , snake_case_=4_4100 , snake_case_=86 , snake_case_=2048 , snake_case_=0.0 , **snake_case_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
__UpperCAmelCase: Tuple = spectrogram_length
__UpperCAmelCase: List[Any] = num_channels
__UpperCAmelCase: Any = patch_size
__UpperCAmelCase: Union[str, Any] = feature_size // self.patch_size[1]
__UpperCAmelCase: Tuple = n_fft
__UpperCAmelCase: Optional[Any] = sampling_rate // hop_length_to_sampling_rate
__UpperCAmelCase: Tuple = sampling_rate
__UpperCAmelCase: str = padding_value
__UpperCAmelCase: List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=A_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[str] = spectrogram(
A_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , )
__UpperCAmelCase: Optional[Any] = log_spec[:, :-1]
__UpperCAmelCase: Optional[int] = log_spec - 2_0.0
__UpperCAmelCase: Any = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = False , snake_case_ = False , **snake_case_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__UpperCAmelCase: Tuple = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__UpperCAmelCase: Tuple = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase: Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
__UpperCAmelCase: List[str] = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase: Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase: str = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__UpperCAmelCase: Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
__UpperCAmelCase: int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__UpperCAmelCase: List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__UpperCAmelCase: Union[str, Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__UpperCAmelCase: int = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
__UpperCAmelCase: List[str] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__UpperCAmelCase: Union[str, Any] = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__UpperCAmelCase: int = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
__UpperCAmelCase: List[Any] = audio_features[i]
__UpperCAmelCase: List[str] = feature
# return as BatchFeature
if return_attention_mask:
__UpperCAmelCase: Any = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
__UpperCAmelCase: List[str] = {"""audio_values""": padded_audio_features}
__UpperCAmelCase: List[str] = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs | 523 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True})
lowerCAmelCase_ = Features({"""text""": Value("""string""")})
lowerCAmelCase_ = Features({})
lowerCAmelCase_ = "text"
@property
def UpperCAmelCase_ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 3 | 0 |
from typing import List
import numpy as np
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {key: len(_A ) for key, value in gen_kwargs.items() if isinstance(_A , _A )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n"
+ "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
snake_case_ = max(lists_lengths.values() , default=0 )
return max(1 , _A )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = []
for group_idx in range(_A ):
snake_case_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
snake_case_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
snake_case_ = range(_A , start + num_shards_to_add )
shards_indices_per_group.append(_A )
return shards_indices_per_group
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = _number_of_shards_in_gen_kwargs(_A )
if num_shards == 1:
return [dict(_A )]
else:
snake_case_ = _distribute_shards(num_shards=_A , max_num_jobs=_A )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_A , _A )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_A ) )
]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _A )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = {len(_A ) for value in gen_kwargs.values() if isinstance(_A , _A )}
snake_case_ = {}
for size in list_sizes:
snake_case_ = list(range(_A ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
snake_case_ = dict(_A )
for key, value in shuffled_kwargs.items():
if isinstance(_A , _A ):
snake_case_ = [value[i] for i in indices_per_size[len(_A )]]
return shuffled_kwargs
| 376 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A_( A : list[float]):
UpperCamelCase = []
UpperCamelCase = len(A)
for i in range(A):
UpperCamelCase = -1
for j in range(i + 1 , A):
if arr[i] < arr[j]:
UpperCamelCase = arr[j]
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = []
for i, outer in enumerate(A):
UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase = inner
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = len(A)
UpperCamelCase = []
UpperCamelCase = [-1] * arr_size
for index in reversed(range(A)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
A__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class snake_case__ :
def __init__( self : List[Any] , __a : List[str] , __a : Any ) -> None:
'''simple docstring'''
__snake_case : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
__snake_case : List[str] = {}
__snake_case : Any = source_vertex
def A_ ( self : Optional[Any] ) -> None:
'''simple docstring'''
__snake_case : Optional[Any] = {self.source_vertex}
__snake_case : Dict = None
__snake_case : Any = [self.source_vertex] # first in first out queue
while queue:
__snake_case : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(A_ )
__snake_case : Optional[int] = vertex
queue.append(A_ )
def A_ ( self : Dict , __a : List[str] ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__snake_case : List[str] = self.parent.get(A_ )
if target_vertex_parent is None:
__snake_case : List[str] = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(A_ )
return self.shortest_path(A_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
A__ : Any = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 286 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def A_( A : str):
if not sentence:
return ""
UpperCamelCase = dict(zip(A , A))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str , __A : int ):
snake_case__ : Tuple = arr.split("," )
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = [int(self.array[0] )] * len(self.array )
snake_case__ : Any = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
snake_case__ : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
snake_case__ : Any = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__lowerCamelCase : List[str] = input("""please input some numbers:""")
__lowerCamelCase : str = SubArray(whole_array)
__lowerCamelCase : str = array.solve_sub_array()
print(("""the results is:""", re))
| 297 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : str = 'RegNetConfig'
# Base docstring
lowerCAmelCase : str = 'facebook/regnet-y-040'
lowerCAmelCase : Dict = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Dict = 'facebook/regnet-y-040'
lowerCAmelCase : int = 'tabby, tabby cat'
lowerCAmelCase : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.convolution(self.padding(A_ ) )
UpperCamelCase = self.normalization(A_ )
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config.num_channels
UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) )
UpperCamelCase = self.embedder(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 2 , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def UpperCAmelCase_ ( self , A_ , A_ = False )-> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(A_ ) , training=A_ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
UpperCamelCase = [
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.pooler(A_ )
for layer_module in self.attention:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Any:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) )
def UpperCAmelCase_ ( self , A_ , A_ = False , A_ = True )-> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(A_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
lowerCAmelCase_ = RegNetConfig
def __init__( self , A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config
UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' )
UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(A_ , training=A_ )
UpperCamelCase = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = """regnet"""
lowerCAmelCase_ = """pixel_values"""
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : str = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
# classification head
UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier[0](A_ )
UpperCamelCase = self.classifier[1](A_ )
UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 3 | 0 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a_ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __a : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int]=None , __a : List[str]=None ):
'''simple docstring'''
if "." in tensor_name:
_lowerCamelCase : List[str] = tensor_name.split('.' )
for split in splits[:-1]:
_lowerCamelCase : List[str] = getattr(__a , __a )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
_lowerCamelCase : Union[str, Any] = new_module
_lowerCamelCase : Tuple = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
_lowerCamelCase : Optional[int] = tensor_name in module._buffers
_lowerCamelCase : Dict = getattr(__a , __a )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Any = False
if is_buffer or not is_bitsandbytes_available():
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Union[str, Any] = False
else:
_lowerCamelCase : Tuple = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_lowerCamelCase : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_lowerCamelCase : str = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_lowerCamelCase : List[str] = old_value.to(__a )
elif isinstance(__a , torch.Tensor ):
_lowerCamelCase : Any = value.to('cpu' )
if value.dtype == torch.inta:
_lowerCamelCase : Optional[int] = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
_lowerCamelCase : int = torch.tensor(__a , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __a ) and fpaa_statistics is None:
_lowerCamelCase : Any = new_value.T
_lowerCamelCase : Optional[Any] = old_value.__dict__
if is_abit:
_lowerCamelCase : Optional[int] = bnb.nn.IntaParams(__a , requires_grad=__a , **__a ).to(__a )
elif is_abit:
_lowerCamelCase : Any = bnb.nn.Paramsabit(__a , requires_grad=__a , **__a ).to(__a )
_lowerCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(__a ) )
else:
if value is None:
_lowerCamelCase : Optional[Any] = old_value.to(__a )
elif isinstance(__a , torch.Tensor ):
_lowerCamelCase : Any = value.to(__a )
else:
_lowerCamelCase : Union[str, Any] = torch.tensor(__a , device=__a )
if is_buffer:
_lowerCamelCase : str = new_value
else:
_lowerCamelCase : Dict = nn.Parameter(__a , requires_grad=old_value.requires_grad )
_lowerCamelCase : int = new_value
def UpperCAmelCase_ ( __a : Union[str, Any] , __a : str=None , __a : List[str]=None , __a : str=None , __a : Dict=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
_lowerCamelCase : Tuple = []
current_key_name.append(__a )
if (isinstance(__a , nn.Linear ) or isinstance(__a , __a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(__a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__a , __a ):
_lowerCamelCase , _lowerCamelCase : str = module.weight.shape
else:
_lowerCamelCase : Optional[int] = module.in_features
_lowerCamelCase : int = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_lowerCamelCase : Union[str, Any] = bnb.nn.LinearabitLt(
__a , __a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_lowerCamelCase : str = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_lowerCamelCase : List[str] = bnb.nn.Linearabit(
__a , __a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_lowerCamelCase : Dict = True
# Store the module class in case we need to transpose the weight later
_lowerCamelCase : str = type(__a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__a )
if len(list(module.children() ) ) > 0:
_lowerCamelCase , _lowerCamelCase : Dict = _replace_with_bnb_linear(
__a , __a , __a , __a , has_been_replaced=__a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase_ ( __a : Union[str, Any] , __a : List[Any]=None , __a : Optional[Any]=None , __a : str=None ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
_lowerCamelCase , _lowerCamelCase : List[str] = _replace_with_bnb_linear(
__a , __a , __a , __a )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCAmelCase_ ( *__a : Tuple , **__a : Optional[int] ):
'''simple docstring'''
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , __a , )
return replace_with_bnb_linear(*__a , **__a )
def UpperCAmelCase_ ( *__a : List[str] , **__a : Tuple ):
'''simple docstring'''
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , __a , )
return set_module_quantized_tensor_to_device(*__a , **__a )
def UpperCAmelCase_ ( __a : int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_lowerCamelCase : List[str] = find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a , __a ):
_lowerCamelCase : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCamelCase : Tuple = sum(__a , [] )
_lowerCamelCase : Optional[Any] = len(__a ) > 0
# Check if it is a base model
_lowerCamelCase : Optional[Any] = not hasattr(__a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCamelCase : List[str] = list(model.named_children() )
_lowerCamelCase : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCamelCase : Dict = set(__a ) - set(__a )
_lowerCamelCase : Dict = list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
_lowerCamelCase : List[Any] = ['.weight', '.bias']
_lowerCamelCase : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCamelCase : str = name.replace(__a , '' )
filtered_module_names.append(__a )
return filtered_module_names
| 437 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """perceiver"""
def __init__( self , A_=256 , A_=1280 , A_=768 , A_=1 , A_=26 , A_=8 , A_=8 , A_=None , A_=None , A_="kv" , A_=1 , A_=1 , A_="gelu" , A_=0.1 , A_=0.02 , A_=1e-12 , A_=True , A_=262 , A_=2048 , A_=56 , A_=[368, 496] , A_=16 , A_=1920 , A_=16 , A_=[1, 16, 224, 224] , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = num_latents
UpperCamelCase = d_latents
UpperCamelCase = d_model
UpperCamelCase = num_blocks
UpperCamelCase = num_self_attends_per_block
UpperCamelCase = num_self_attention_heads
UpperCamelCase = num_cross_attention_heads
UpperCamelCase = qk_channels
UpperCamelCase = v_channels
UpperCamelCase = cross_attention_shape_for_attention
UpperCamelCase = self_attention_widening_factor
UpperCamelCase = cross_attention_widening_factor
UpperCamelCase = hidden_act
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_query_residual
# masked language modeling attributes
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
# image classification attributes
UpperCamelCase = image_size
# flow attributes
UpperCamelCase = train_size
# multimodal autoencoding attributes
UpperCamelCase = num_frames
UpperCamelCase = audio_samples_per_frame
UpperCamelCase = samples_per_patch
UpperCamelCase = output_shape
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
return 1e-4
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , )-> Mapping[str, Any]:
'''simple docstring'''
if isinstance(A_ , A_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = preprocessor.num_special_tokens_to_add(A_ )
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join(['a'] ) * seq_length] * batch_size
UpperCamelCase = dict(preprocessor(A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(A_ , A_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase = self._generate_dummy_images(A_ , A_ , A_ , A_ )
UpperCamelCase = dict(preprocessor(images=A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 3 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: Optional[int] , UpperCamelCase: List[str] ):
"""simple docstring"""
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def _UpperCAmelCase ( UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any]="attention" ):
"""simple docstring"""
__lowerCAmelCase = __lowerCAmelCase = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
__lowerCAmelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__lowerCAmelCase = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
__lowerCAmelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__lowerCAmelCase = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
__lowerCAmelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__lowerCAmelCase = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
__lowerCAmelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _UpperCAmelCase ( UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: str , UpperCamelCase: Optional[Any]=False ):
"""simple docstring"""
if split_mlp_wi:
__lowerCAmelCase = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
__lowerCAmelCase = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
__lowerCAmelCase = (wi_a, wi_a)
else:
__lowerCAmelCase = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
__lowerCAmelCase = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def _UpperCAmelCase ( UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def _UpperCAmelCase ( UpperCamelCase: dict , *, UpperCamelCase: int , UpperCamelCase: bool , UpperCamelCase: bool = False ):
"""simple docstring"""
__lowerCAmelCase = traverse_util.flatten_dict(variables["target"] )
__lowerCAmelCase = {"/".join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowerCAmelCase = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , UpperCamelCase )
__lowerCAmelCase = collections.OrderedDict()
# Shared embeddings.
__lowerCAmelCase = old["token_embedder/embedding"]
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
__lowerCAmelCase = tax_layer_norm_lookup(UpperCamelCase , UpperCamelCase , "encoder" , "pre_attention_layer_norm" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = tax_attention_lookup(UpperCamelCase , UpperCamelCase , "encoder" , "attention" )
__lowerCAmelCase = layer_norm
__lowerCAmelCase = k.T
__lowerCAmelCase = o.T
__lowerCAmelCase = q.T
__lowerCAmelCase = v.T
# Block i, layer 1 (MLP).
__lowerCAmelCase = tax_layer_norm_lookup(UpperCamelCase , UpperCamelCase , "encoder" , "pre_mlp_layer_norm" )
__lowerCAmelCase , __lowerCAmelCase = tax_mlp_lookup(UpperCamelCase , UpperCamelCase , "encoder" , UpperCamelCase )
__lowerCAmelCase = layer_norm
if split_mlp_wi:
__lowerCAmelCase = wi[0].T
__lowerCAmelCase = wi[1].T
else:
__lowerCAmelCase = wi.T
__lowerCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowerCAmelCase = tax_relpos_bias_lookup(
UpperCamelCase , UpperCamelCase , "encoder" ).T
__lowerCAmelCase = old["encoder/encoder_norm/scale"]
if not scalable_attention:
__lowerCAmelCase = tax_relpos_bias_lookup(
UpperCamelCase , 0 , "encoder" ).T
__lowerCAmelCase = tax_relpos_bias_lookup(
UpperCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
__lowerCAmelCase = tax_layer_norm_lookup(UpperCamelCase , UpperCamelCase , "decoder" , "pre_self_attention_layer_norm" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = tax_attention_lookup(UpperCamelCase , UpperCamelCase , "decoder" , "self_attention" )
__lowerCAmelCase = layer_norm
__lowerCAmelCase = k.T
__lowerCAmelCase = o.T
__lowerCAmelCase = q.T
__lowerCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
__lowerCAmelCase = tax_layer_norm_lookup(UpperCamelCase , UpperCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = tax_attention_lookup(UpperCamelCase , UpperCamelCase , "decoder" , "encoder_decoder_attention" )
__lowerCAmelCase = layer_norm
__lowerCAmelCase = k.T
__lowerCAmelCase = o.T
__lowerCAmelCase = q.T
__lowerCAmelCase = v.T
# Block i, layer 2 (MLP).
__lowerCAmelCase = tax_layer_norm_lookup(UpperCamelCase , UpperCamelCase , "decoder" , "pre_mlp_layer_norm" )
__lowerCAmelCase , __lowerCAmelCase = tax_mlp_lookup(UpperCamelCase , UpperCamelCase , "decoder" , UpperCamelCase )
__lowerCAmelCase = layer_norm
if split_mlp_wi:
__lowerCAmelCase = wi[0].T
__lowerCAmelCase = wi[1].T
else:
__lowerCAmelCase = wi.T
__lowerCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowerCAmelCase = tax_relpos_bias_lookup(UpperCamelCase , UpperCamelCase , "decoder" ).T
__lowerCAmelCase = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowerCAmelCase = old["decoder/logits_dense/kernel"].T
return new
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: bool ):
"""simple docstring"""
__lowerCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowerCAmelCase = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowerCAmelCase = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__lowerCAmelCase = state_dict["shared.weight"]
return state_dict
def _UpperCAmelCase ( UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: List[Any] ):
"""simple docstring"""
__lowerCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase )
__lowerCAmelCase = convert_tax_to_pytorch(
UpperCamelCase , num_layers=config.num_layers , is_encoder_only=UpperCamelCase , scalable_attention=UpperCamelCase )
__lowerCAmelCase = make_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
__lowerCAmelCase = MTaConfig.from_json_file(UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowerCAmelCase = UMTaEncoderModel(UpperCamelCase )
else:
__lowerCAmelCase = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print("Done" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
UpperCamelCase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 611 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase_ :
'''simple docstring'''
a__ : str = XGLMConfig
a__ : Tuple = {}
a__ : Union[str, Any] = """gelu"""
def __init__( self , __lowercase , __lowercase=14 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=0.02 , ) -> Tuple:
__UpperCamelCase :Dict = parent
__UpperCamelCase :List[Any] = batch_size
__UpperCamelCase :Dict = seq_length
__UpperCamelCase :Union[str, Any] = is_training
__UpperCamelCase :int = use_input_mask
__UpperCamelCase :Optional[int] = use_labels
__UpperCamelCase :Optional[Any] = vocab_size
__UpperCamelCase :str = d_model
__UpperCamelCase :int = num_hidden_layers
__UpperCamelCase :Optional[int] = num_attention_heads
__UpperCamelCase :List[str] = ffn_dim
__UpperCamelCase :List[str] = activation_function
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = max_position_embeddings
__UpperCamelCase :Tuple = initializer_range
__UpperCamelCase :Tuple = None
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :Tuple = 2
__UpperCamelCase :List[Any] = 1
def UpperCamelCase__ ( self) -> Union[str, Any]:
return XGLMConfig.from_pretrained('''facebook/xglm-564M''')
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :str = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
__UpperCamelCase :Any = None
if self.use_input_mask:
__UpperCamelCase :int = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Optional[int] = self.get_config()
__UpperCamelCase :Optional[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCamelCase__ ( self) -> Optional[Any]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Union[str, Any] = config_and_inputs
__UpperCamelCase :Any = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
a__ : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
a__ : List[str] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
a__ : List[Any] = False
a__ : List[str] = False
a__ : Any = False
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :int = TFXGLMModelTester(self)
__UpperCamelCase :Any = ConfigTester(self , config_class=A_ , n_embd=37)
def UpperCamelCase__ ( self) -> int:
self.config_tester.run_common_tests()
@slow
def UpperCamelCase__ ( self) -> List[str]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Union[str, Any] = TFXGLMModel.from_pretrained(A_)
self.assertIsNotNone(A_)
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self , __lowercase=True) -> List[Any]:
__UpperCamelCase :List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
__UpperCamelCase :Dict = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase :Dict = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
__UpperCamelCase :Any = model.generate(A_ , do_sample=A_ , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_)
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
__UpperCamelCase :Tuple = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
tf.random.set_seed(0)
__UpperCamelCase :Optional[Any] = tokenizer('''Today is a nice day and''' , return_tensors='''tf''')
__UpperCamelCase :List[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0'''):
__UpperCamelCase :Optional[Any] = model.generate(A_ , do_sample=A_ , seed=[7, 0])
__UpperCamelCase :int = tokenizer.decode(output_ids[0] , skip_special_tokens=A_)
__UpperCamelCase :Any = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(A_ , A_)
@slow
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
__UpperCamelCase :Union[str, Any] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
__UpperCamelCase :Union[str, Any] = '''left'''
# use different length sentences to test batching
__UpperCamelCase :Tuple = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
__UpperCamelCase :Tuple = tokenizer(A_ , return_tensors='''tf''' , padding=A_)
__UpperCamelCase :Dict = inputs['''input_ids''']
__UpperCamelCase :Union[str, Any] = model.generate(input_ids=A_ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12)
__UpperCamelCase :Any = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
__UpperCamelCase :int = model.generate(input_ids=A_ , max_new_tokens=12)
__UpperCamelCase :List[Any] = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
__UpperCamelCase :Tuple = model.generate(input_ids=A_ , max_new_tokens=12)
__UpperCamelCase :Optional[int] = tokenizer.batch_decode(A_ , skip_special_tokens=A_)
__UpperCamelCase :Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_)
__UpperCamelCase :Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=A_)
__UpperCamelCase :str = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(A_ , A_)
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence])
| 167 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __snake_case :
def __init__( self : List[Any] , A_ : Dict , A_ : List[Any]=1_0_0 , A_ : Tuple=1_3 , A_ : Optional[int]=3_0 , A_ : str=2 , A_ : List[Any]=3 , A_ : str=True , A_ : Optional[Any]=True , A_ : int=3_2 , A_ : List[Any]=4 , A_ : Dict=4 , A_ : Optional[Any]=3_7 , A_ : str="gelu" , A_ : List[Any]=0.1 , A_ : Union[str, Any]=0.1 , A_ : Any=1_0 , A_ : Tuple=0.02 , A_ : Any=3 , A_ : List[str]=None , A_ : Tuple=[0, 1, 2, 3] , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Union[str, Any] = 1_0_0
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : List[Any] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : int = num_channels
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_labels
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Optional[int] = scope
lowerCAmelCase_ : List[Any] = out_indices
lowerCAmelCase_ : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ : Dict = (image_size // patch_size) ** 2
lowerCAmelCase_ : Any = num_patches + 1
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Tuple = None
if self.use_labels:
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCAmelCase_ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : Any):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Any , A_ : Optional[int] , A_ : str , A_ : Tuple):
lowerCAmelCase_ : Optional[int] = BeitModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : int = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self : Dict , A_ : Tuple , A_ : int , A_ : List[str] , A_ : Dict):
lowerCAmelCase_ : Tuple = BeitForMaskedImageModeling(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size))
def UpperCAmelCase__ ( self : Optional[int] , A_ : Any , A_ : Tuple , A_ : int , A_ : Dict):
lowerCAmelCase_ : List[str] = self.type_sequence_label_size
lowerCAmelCase_ : List[str] = BeitForImageClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : int = model(A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowerCAmelCase_ : Union[str, Any] = 1
lowerCAmelCase_ : str = BeitForImageClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCAmelCase_ : Dict = model(A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase__ ( self : int , A_ : List[str] , A_ : Optional[Any] , A_ : int , A_ : Union[str, Any]):
lowerCAmelCase_ : Optional[Any] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = BeitForSemanticSegmentation(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : str = model(A_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2))
lowerCAmelCase_ : Optional[int] = model(A_ , labels=A_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2))
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = config_and_inputs
lowerCAmelCase_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( snake_case_ ,snake_case_ ,unittest.TestCase ):
_a = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Tuple = BeitModelTester(self)
lowerCAmelCase_ : int = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=3_7)
def UpperCAmelCase__ ( self : Union[str, Any]):
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''')
def UpperCAmelCase__ ( self : Optional[int]):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase__ ( self : Dict):
pass
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = model_class(A_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowerCAmelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear))
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[Any] = model_class(A_)
lowerCAmelCase_ : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_)
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
if not self.model_tester.is_training:
return
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_), BeitForMaskedImageModeling]:
continue
lowerCAmelCase_ : Optional[Any] = model_class(A_)
model.to(A_)
model.train()
lowerCAmelCase_ : int = self._prepare_for_class(A_ , A_ , return_labels=A_)
lowerCAmelCase_ : Tuple = model(**A_).loss
loss.backward()
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase_ : str = model_class(A_)
model.gradient_checkpointing_enable()
model.to(A_)
model.train()
lowerCAmelCase_ : Any = self._prepare_for_class(A_ , A_ , return_labels=A_)
lowerCAmelCase_ : Dict = model(**A_).loss
loss.backward()
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Dict = _config_zero_init(A_)
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(config=A_)
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def UpperCAmelCase__ ( self : Dict):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[str] = BeitModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCamelCase( ):
lowerCAmelCase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Tuple):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''') if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : List[Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''').to(A_)
lowerCAmelCase_ : Optional[Any] = self.default_image_processor
lowerCAmelCase_ : Any = prepare_img()
lowerCAmelCase_ : Any = image_processor(images=A_ , return_tensors='''pt''').pixel_values.to(A_)
# prepare bool_masked_pos
lowerCAmelCase_ : Optional[int] = torch.ones((1, 1_9_6) , dtype=torch.bool).to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(pixel_values=A_ , bool_masked_pos=A_)
lowerCAmelCase_ : Union[str, Any] = outputs.logits
# verify the logits
lowerCAmelCase_ : str = torch.Size((1, 1_9_6, 8_1_9_2))
self.assertEqual(logits.shape , A_)
lowerCAmelCase_ : Tuple = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]]).to(A_)
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2))
@slow
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Optional[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''').to(A_)
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : str = prepare_img()
lowerCAmelCase_ : Dict = image_processor(images=A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**A_)
lowerCAmelCase_ : Optional[int] = outputs.logits
# verify the logits
lowerCAmelCase_ : Union[str, Any] = torch.Size((1, 1_0_0_0))
self.assertEqual(logits.shape , A_)
lowerCAmelCase_ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108]).to(A_)
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4))
lowerCAmelCase_ : Optional[Any] = 2_8_1
self.assertEqual(logits.argmax(-1).item() , A_)
@slow
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Union[str, Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''').to(
A_)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int = model(**A_)
lowerCAmelCase_ : Dict = outputs.logits
# verify the logits
lowerCAmelCase_ : List[str] = torch.Size((1, 2_1_8_4_1))
self.assertEqual(logits.shape , A_)
lowerCAmelCase_ : Tuple = torch.tensor([1.6881, -0.2787, 0.5901]).to(A_)
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4))
lowerCAmelCase_ : Union[str, Any] = 2_3_9_6
self.assertEqual(logits.argmax(-1).item() , A_)
@slow
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Any = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''')
lowerCAmelCase_ : Optional[int] = model.to(A_)
lowerCAmelCase_ : str = BeitImageProcessor(do_resize=A_ , size=6_4_0 , do_center_crop=A_)
lowerCAmelCase_ : List[str] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''')
lowerCAmelCase_ : Any = Image.open(ds[0]['''file'''])
lowerCAmelCase_ : List[str] = image_processor(images=A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**A_)
lowerCAmelCase_ : List[Any] = outputs.logits
# verify the logits
lowerCAmelCase_ : str = torch.Size((1, 1_5_0, 1_6_0, 1_6_0))
self.assertEqual(logits.shape , A_)
lowerCAmelCase_ : Union[str, Any] = version.parse(PIL.__version__) < version.parse('''9.0.0''')
if is_pillow_less_than_a:
lowerCAmelCase_ : Optional[int] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=A_ , )
else:
lowerCAmelCase_ : Optional[int] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4))
@slow
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[str] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''')
lowerCAmelCase_ : Optional[int] = model.to(A_)
lowerCAmelCase_ : List[Any] = BeitImageProcessor(do_resize=A_ , size=6_4_0 , do_center_crop=A_)
lowerCAmelCase_ : Optional[int] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''')
lowerCAmelCase_ : Any = Image.open(ds[0]['''file'''])
lowerCAmelCase_ : int = image_processor(images=A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**A_)
lowerCAmelCase_ : str = outputs.logits.detach().cpu()
lowerCAmelCase_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(5_0_0, 3_0_0)])
lowerCAmelCase_ : Optional[Any] = torch.Size((5_0_0, 3_0_0))
self.assertEqual(segmentation[0].shape , A_)
lowerCAmelCase_ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=A_)
lowerCAmelCase_ : int = torch.Size((1_6_0, 1_6_0))
self.assertEqual(segmentation[0].shape , A_)
| 171 |
'''simple docstring'''
def A_( A : list[int]):
UpperCamelCase = []
if len(A) == 1:
return [nums.copy()]
for _ in range(len(A)):
UpperCamelCase = nums.pop(0)
UpperCamelCase = permute(A)
for perm in permutations:
perm.append(A)
result.extend(A)
nums.append(A)
return result
def A_( A : str):
def backtrack(A : str):
if start == len(A) - 1:
output.append(nums[:])
else:
for i in range(A , len(A)):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1)
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 3 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__: Dict = logging.get_logger(__name__)
lowerCAmelCase__: str = '▁'
lowerCAmelCase__: List[str] = {'vocab_file': 'prophetnet.tokenizer'}
lowerCAmelCase__: Any = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
lowerCAmelCase__: Union[str, Any] = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
lowerCAmelCase__: Dict = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Any = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as reader:
SCREAMING_SNAKE_CASE_ : str = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token.rstrip('\n' )
SCREAMING_SNAKE_CASE_ : Optional[int] = index
return vocab
class snake_case_ ( snake_case_ ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = ['input_ids', 'attention_mask']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
SCREAMING_SNAKE_CASE_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE_ : Dict = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE_ : Dict = F'[unused{i}]'
SCREAMING_SNAKE_CASE_ : List[Any] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 12
SCREAMING_SNAKE_CASE_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A_ )
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict = None
return state
def __setstate__( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : str = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self ):
return len(self.sp_model ) + self.fairseq_offset
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , __lowerCAmelCase ):
return self.sp_model.encode(A_ , out_type=A_ )
def __A ( self , __lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : Dict = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self , __lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = ''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : str = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 345 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def A_( A : float , A : float , A : int):
UpperCamelCase = x
UpperCamelCase = y
for step in range(A): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(A , 1 , 1))
def A_( A : int = 800 , A : int = 600 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 50 , A : bool = True , ):
UpperCamelCase = Image.new('RGB' , (image_width, image_height))
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(A):
for image_y in range(A):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(A , A , A)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(A)
else:
UpperCamelCase = get_black_and_white_rgb(A)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 3 | 0 |
import qiskit
def __SCREAMING_SNAKE_CASE ( a__ : int = 2 ) -> Optional[int]:
__A : str = qubits
# Using Aer's simulator
__A : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
__A : Tuple = qiskit.QuantumCircuit(a__ ,a__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 ,a__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 ,a__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(a__ ) ) ,list(range(a__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__A : Tuple = qiskit.execute(a__ ,a__ ,shots=1000 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""")
| 17 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def UpperCamelCase__ ( _lowercase : Any , _lowercase : float = 0.0 , _lowercase : float = 1.0 ) -> Dict:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 523 |
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[Any] , __lowercase : int=7 , __lowercase : Optional[int]=3 , __lowercase : str=30 , __lowercase : Union[str, Any]=4_00 , __lowercase : Any=True , __lowercase : Tuple=None , __lowercase : Tuple=True , __lowercase : Dict=[0.5, 0.5, 0.5] , __lowercase : List[Any]=[0.5, 0.5, 0.5] , __lowercase : Any=True , __lowercase : Optional[Any]=1 / 2_55 , __lowercase : Tuple=True , ):
"""simple docstring"""
snake_case_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
def snake_case__ ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : List[str]=False ):
"""simple docstring"""
if not batched:
snake_case_ = image_inputs[0]
if isinstance(A_ , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size["shortest_edge"] * h / w )
snake_case_ = self.size["shortest_edge"]
elif w > h:
snake_case_ = self.size["shortest_edge"]
snake_case_ = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ = self.size["shortest_edge"]
snake_case_ = self.size["shortest_edge"]
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(A_ , key=lambda __lowercase : item[0] )[0]
snake_case_ = max(A_ , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = DetaImageProcessor if is_vision_available() else None
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = DetaImageProcessingTester(self )
@property
def snake_case__ ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_rescale" ) )
self.assertTrue(hasattr(A_ , "do_pad" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
def snake_case__ ( self : Any ):
"""simple docstring"""
pass
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
snake_case_ = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(A_ , return_tensors="pt" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(A_ , return_tensors="pt" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ = DetaImageProcessor()
snake_case_ = image_processing(images=A_ , annotations=A_ , return_tensors="pt" )
# verify pixel values
snake_case_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A_ )
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ )
snake_case_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) )
# verify orig_size
snake_case_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) )
# verify size
snake_case_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
@slow
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ = DetaImageProcessor(format="coco_panoptic" )
snake_case_ = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors="pt" )
# verify pixel values
snake_case_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A_ )
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ )
snake_case_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) )
# verify masks
snake_case_ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A_ )
# verify orig_size
snake_case_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) )
# verify size
snake_case_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
| 376 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
import bisect
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> Union[str, Any]:
if hi < 0:
__snake_case : str = len(_UpperCAmelCase )
while lo < hi:
__snake_case : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__snake_case : Optional[Any] = mid + 1
else:
__snake_case : List[Any] = mid
return lo
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> str:
if hi < 0:
__snake_case : Optional[Any] = len(_UpperCAmelCase )
while lo < hi:
__snake_case : int = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__snake_case : Optional[int] = mid + 1
else:
__snake_case : List[str] = mid
return lo
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> str:
sorted_collection.insert(bisect_left(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase )
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> int:
sorted_collection.insert(bisect_right(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase )
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> Dict:
__snake_case : Dict = 0
__snake_case : Union[str, Any] = len(_UpperCAmelCase ) - 1
while left <= right:
__snake_case : int = left + (right - left) // 2
__snake_case : int = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__snake_case : Optional[Any] = midpoint - 1
else:
__snake_case : int = midpoint + 1
return None
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> Dict:
__snake_case : Union[str, Any] = bisect.bisect_left(_UpperCAmelCase ,_UpperCAmelCase )
if index != len(_UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Optional[int]:
if right < left:
return None
__snake_case : str = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCAmelCase ,_UpperCAmelCase ,midpoint + 1 ,_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = input('''Enter numbers separated by comma:\n''').strip()
A__ : int = sorted(int(item) for item in user_input.split(''','''))
A__ : Optional[Any] = int(input('''Enter a single number to be found in the list:\n'''))
A__ : Any = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 286 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """unispeech-sat"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 3 | 0 |
import os
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
snake_case__ : Optional[int] = len(grid[0] )
snake_case__ : List[str] = len(snake_case_ )
snake_case__ : str = 0
snake_case__ : List[str] = 0
snake_case__ : Optional[int] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(snake_case_ ):
for j in range(n_rows - 3 ):
snake_case__ : Tuple = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case__ : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case__ : Tuple = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case__ : int = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case__ : Optional[int] = max(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if max_product > largest:
snake_case__ : str = max_product
return largest
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : int = []
with open(os.path.dirname(snake_case_ ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
snake_case__ : List[Any] = [[int(snake_case_ ) for i in grid[j]] for j in range(len(snake_case_ ) )]
return largest_product(snake_case_ )
if __name__ == "__main__":
print(solution())
| 297 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=100 , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=[0, 1, 2, 3] , )-> Any:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = 100
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = out_indices
UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = BeitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = BeitForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase = False
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=A_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BeitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
# prepare bool_masked_pos
UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(pixel_values=A_ , bool_masked_pos=A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(A_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21841) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
UpperCamelCase = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=A_ , )
else:
UpperCamelCase = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , A_ )
| 3 | 0 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A_(snake_case_ ):
"""simple docstring"""
def __init__( self , A , A = None , A = None , A = True , A = None , A = False , A = None , A = True , A = "arrow" , **A , ):
super().__init__(
split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , **A_ , )
_lowerCamelCase : Union[str, Any] = load_from_cache_file
_lowerCamelCase : Optional[Any] = file_format
_lowerCamelCase : int = Spark(
df=A_ , features=A_ , cache_dir=A_ , working_dir=A_ , **A_ , )
def _lowerCAmelCase ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowerCamelCase : List[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=A_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 437 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """generated"""
def __init__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = {}
if truncation is not None:
UpperCamelCase = truncation
UpperCamelCase = generate_kwargs
UpperCamelCase = {}
if return_tensors is not None and return_type is None:
UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
return True
def UpperCAmelCase_ ( self , *A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , A_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
UpperCamelCase = ([prefix + arg for arg in args[0]],)
UpperCamelCase = True
elif isinstance(args[0] , A_ ):
UpperCamelCase = (prefix + args[0],)
UpperCamelCase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCamelCase = self.tokenizer(*A_ , padding=A_ , truncation=A_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = super().__call__(*A_ , **A_ )
if (
isinstance(args[0] , A_ )
and all(isinstance(A_ , A_ ) for el in args[0] )
and all(len(A_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self , A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self._parse_and_tokenize(A_ , truncation=A_ , **A_ )
return inputs
def UpperCAmelCase_ ( self , A_ , **A_ )-> int:
'''simple docstring'''
if self.framework == "pt":
UpperCamelCase , UpperCamelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
UpperCamelCase , UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy()
UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(A_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
UpperCamelCase = self.model.generate(**A_ , **A_ )
UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCamelCase = output_ids.reshape(A_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCamelCase = tf.reshape(A_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self , A_ , A_=ReturnType.TEXT , A_=False )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCamelCase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCamelCase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
}
records.append(A_ )
return records
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """summary"""
def __call__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """translation"""
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase_ ( self , *A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , A_=None , A_=None )-> Dict:
'''simple docstring'''
if getattr(self.tokenizer , '_build_translation_inputs' , A_ ):
return self.tokenizer._build_translation_inputs(
*A_ , return_tensors=self.framework , truncation=A_ , src_lang=A_ , tgt_lang=A_ )
else:
return super()._parse_and_tokenize(*A_ , truncation=A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , **A_ )-> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = super()._sanitize_parameters(**A_ )
if src_lang is not None:
UpperCamelCase = src_lang
if tgt_lang is not None:
UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCamelCase = kwargs.get('task' , self.task )
UpperCamelCase = task.split('_' )
if task and len(A_ ) == 4:
# translation, XX, to YY
UpperCamelCase = items[1]
UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
| 3 | 0 |
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
__lowerCAmelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 611 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
lowerCAmelCase_ = 3.0
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : List[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : int = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : Dict = ''
lowerCAmelCase : Dict = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 3 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A_ = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
A_ = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A_ = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A_ = sorted(arg_to_scheduler.keys())
A_ = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowercase_ ( pl.LightningModule ):
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str=None , __lowerCamelCase : Tuple="base" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Union[str, Any] , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(A_ )
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = Path(self.hparams.output_dir )
snake_case__ : int = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
snake_case__ : str = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=A_ , **A_ , )
else:
snake_case__ : Any = config
snake_case__ : str = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , A_ , A_ ):
assert hasattr(self.config , A_ ), F"model config doesn\'t have a `{p}` attribute"
setattr(self.config , A_ , getattr(self.hparams , A_ ) )
if tokenizer is None:
snake_case__ : int = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=A_ , )
else:
snake_case__ : Tuple = tokenizer
snake_case__ : List[str] = MODEL_MODES[mode]
if model is None:
snake_case__ : List[str] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=A_ , )
else:
snake_case__ : Union[str, Any] = model
def _lowerCAmelCase ( self : str , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ):
snake_case__ : str = self.model_type.from_pretrained(*A_ , **A_ )
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
snake_case__ : int = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
snake_case__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Any = self.model
snake_case__ : Optional[Any] = ['bias', 'LayerNorm.weight']
snake_case__ : int = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
snake_case__ : List[str] = Adafactor(
A_ , lr=self.hparams.learning_rate , scale_parameter=A_ , relative_step=A_ )
else:
snake_case__ : List[Any] = AdamW(
A_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
snake_case__ : Optional[int] = optimizer
snake_case__ : str = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _lowerCAmelCase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ):
return self.validation_step(A_ , A_ )
def _lowerCAmelCase ( self : str , __lowerCamelCase : Optional[Any] ):
return self.validation_end(A_ )
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
snake_case__ : List[str] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] ):
if stage == "test":
snake_case__ : Optional[Any] = len(self.test_dataloader().dataset )
else:
snake_case__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=A_ )
snake_case__ : Optional[int] = len(self.train_dataloader().dataset )
def _lowerCAmelCase ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Any = False ):
raise NotImplementedError('You must implement this for your task' )
def _lowerCAmelCase ( self : List[str] ):
return self.train_loader
def _lowerCAmelCase ( self : Dict ):
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=A_ )
def _lowerCAmelCase ( self : Dict ):
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=A_ )
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ):
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
A_ , list(filter(A_ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def _lowerCAmelCase ( self : Any , __lowerCamelCase : Dict ):
snake_case__ : Any = self.output_dir.joinpath('best_tfmr' )
snake_case__ : Optional[int] = self.step_count
self.model.save_pretrained(A_ )
self.tokenizer.save_pretrained(A_ )
@staticmethod
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
parser.add_argument(
'--model_name_or_path' , default=A_ , type=A_ , required=A_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=A_ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=A_ , type=A_ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(A_ ).parent / 'test_run' / 'cache' ) , type=A_ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=A_ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=A_ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=A_ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=A_ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=A_ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=A_ , metavar=A_ , type=A_ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=A_ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=A_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=A_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=A_ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=A_ )
parser.add_argument('--train_batch_size' , default=32 , type=A_ )
parser.add_argument('--eval_batch_size' , default=32 , type=A_ )
parser.add_argument('--adafactor' , action='store_true' )
class lowercase_ ( pl.Callback ):
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowercase_ ( pl.Callback ):
def _lowerCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : List[str] ):
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(A_ )
class lowercase_ ( pl.Callback ):
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
snake_case__ : Optional[Any] = trainer.lr_schedulers[0]['scheduler']
snake_case__ : int = {F"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(A_ )
def _lowerCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
rank_zero_info('***** Validation results *****' )
snake_case__ : List[str] = trainer.callback_metrics
# Log results
for key in sorted(A_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(A_ , str(metrics[key] ) ) )
def _lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
rank_zero_info('***** Test results *****' )
snake_case__ : Optional[Any] = trainer.callback_metrics
# Log and save results to file
snake_case__ : List[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(A_ , 'w' ) as writer:
for key in sorted(A_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(A_ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(A_ , str(metrics[key] ) ) )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'model_checkpoints' ) , type=__SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=__SCREAMING_SNAKE_CASE , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=__SCREAMING_SNAKE_CASE , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=__SCREAMING_SNAKE_CASE , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'dummy-train-data' ) , type=__SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[] , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
pl.seed_everything(args.seed )
# init model
snake_case__ : Optional[int] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
# add custom checkpoints
if checkpoint_callback is None:
snake_case__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__SCREAMING_SNAKE_CASE )
if logging_callback is None:
snake_case__ : Optional[int] = LoggingCallback()
snake_case__ : Union[str, Any] = {}
if args.fpaa:
snake_case__ : Any = 16
if args.gpus > 1:
snake_case__ : Any = 'auto'
snake_case__ : Optional[int] = 'ddp'
snake_case__ : Any = args.accumulate_grad_batches
snake_case__ : Any = None
snake_case__ : Any = 'auto'
snake_case__ : str = pl.Trainer.from_argparse_args(
__SCREAMING_SNAKE_CASE , weights_summary=__SCREAMING_SNAKE_CASE , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__SCREAMING_SNAKE_CASE , val_check_interval=1 , num_sanity_val_steps=2 , **__SCREAMING_SNAKE_CASE , )
if args.do_train:
trainer.fit(__SCREAMING_SNAKE_CASE )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 270 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None )-> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> torch.FloatTensor:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( snake_case_ ):
'''simple docstring'''
a__ : Any = ["""image_processor""", """tokenizer"""]
a__ : Optional[int] = """ChineseCLIPImageProcessor"""
a__ : int = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __lowercase=None , __lowercase=None , **__lowercase) -> int:
__UpperCamelCase :List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A_ , )
__UpperCamelCase :Optional[Any] = kwargs.pop('''feature_extractor''')
__UpperCamelCase :Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(A_ , A_)
__UpperCamelCase :List[Any] = self.image_processor
def __call__( self , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase) -> List[Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
__UpperCamelCase :Optional[int] = self.tokenizer(A_ , return_tensors=A_ , **A_)
if images is not None:
__UpperCamelCase :Union[str, Any] = self.image_processor(A_ , return_tensors=A_ , **A_)
if text is not None and images is not None:
__UpperCamelCase :Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_) , tensor_type=A_)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Optional[Any]:
return self.tokenizer.batch_decode(*A_ , **A_)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Tuple:
return self.tokenizer.decode(*A_ , **A_)
@property
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Any = self.tokenizer.model_input_names
__UpperCamelCase :Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCamelCase__ ( self) -> List[str]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A_ , )
return self.image_processor_class
| 167 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
A__ : List[Any] = range(2, 20 + 1)
A__ : Union[str, Any] = [10**k for k in range(ks[-1] + 1)]
A__ : dict[int, dict[int, list[list[int]]]] = {}
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : str ):
lowerCAmelCase_ : Dict = sum(a_i[j] for j in range(__UpperCamelCase ,len(__UpperCamelCase ) ) )
lowerCAmelCase_ : Union[str, Any] = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) ,__UpperCamelCase ) ) )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = 0, 0
lowerCAmelCase_ : Union[str, Any] = n - i
lowerCAmelCase_ : List[Any] = memo.get(__UpperCamelCase )
if sub_memo is not None:
lowerCAmelCase_ : str = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowerCAmelCase_ : str = -1
for _k in range(len(__UpperCamelCase ) - 1 ,-1 ,-1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase_ : Optional[int] = _k
break
if max_jump >= 0:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase_ : Tuple = diff + c
for j in range(min(__UpperCamelCase ,len(__UpperCamelCase ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = divmod(__UpperCamelCase ,10 )
if new_c > 0:
add(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else:
lowerCAmelCase_ : str = []
else:
lowerCAmelCase_ : List[str] = {c: []}
lowerCAmelCase_ : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = next_term(__UpperCamelCase ,k - 1 ,i + dn ,__UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase_ , lowerCAmelCase_ : int = compute(__UpperCamelCase ,__UpperCamelCase ,i + dn ,__UpperCamelCase )
diff += _diff
dn += terms_jumped
lowerCAmelCase_ : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase_ : Dict = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase ,(diff, dn, k) )
return (diff, dn)
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ):
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase_ : int = i
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase_ : Optional[int] = ds_c + ds_b
diff += addend
lowerCAmelCase_ : Optional[int] = 0
for j in range(__UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = a_i[j] + addend
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = divmod(__UpperCamelCase ,10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return diff, i - start_i
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : Dict ,__UpperCamelCase : Tuple ):
for j in range(__UpperCamelCase ,len(__UpperCamelCase ) ):
lowerCAmelCase_ : Optional[Any] = digits[j] + addend
if s >= 10:
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = divmod(__UpperCamelCase ,10 )
lowerCAmelCase_ : str = addend // 10 + quotient
else:
lowerCAmelCase_ : List[Any] = s
lowerCAmelCase_ : Union[str, Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase_ , lowerCAmelCase_ : int = divmod(__UpperCamelCase ,10 )
digits.append(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : int = 10**15 ):
lowerCAmelCase_ : Optional[int] = [1]
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : List[str] = 0
while True:
lowerCAmelCase_ , lowerCAmelCase_ : str = next_term(__UpperCamelCase ,20 ,i + dn ,__UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase_ : str = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 171 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None )-> Optional[Any]:
'''simple docstring'''
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ )-> List[Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self , A_ , A_ = False )-> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
self.generated_responses.append(A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self )-> Any:
'''simple docstring'''
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
snake_case_ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self , A_ , A_=32 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=10 , **A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=True )-> Tuple:
'''simple docstring'''
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__: Tuple = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase__: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 345 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3 | 0 |
import math
import qiskit
def __SCREAMING_SNAKE_CASE ( a__ : int = 1 ,a__ : int = 1 ,a__ : int = 1 ) -> Any:
if (
isinstance(a__ ,a__ )
or isinstance(a__ ,a__ )
or isinstance(a__ ,a__ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(a__ ) != input_a)
or (math.floor(a__ ) != input_a)
or (math.floor(a__ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
__A : List[Any] = qiskit.QuantumRegister(4 ,"""qr""" )
__A : List[Any] = qiskit.ClassicalRegister(2 ,"""cr""" )
# list the entries
__A : str = [input_a, input_a, carry_in]
__A : Tuple = qiskit.QuantumCircuit(a__ ,a__ )
for i in range(0 ,3 ):
if entry[i] == 2:
quantum_circuit.h(a__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 ,1 ,3 ) # ccx = toffoli gate
quantum_circuit.cx(0 ,1 )
quantum_circuit.ccx(1 ,2 ,3 )
quantum_circuit.cx(1 ,2 )
quantum_circuit.cx(0 ,1 )
quantum_circuit.measure([2, 3] ,a__ ) # measure the last two qbits
__A : Tuple = qiskit.Aer.get_backend("""aer_simulator""" )
__A : Any = qiskit.execute(a__ ,a__ ,shots=1000 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 17 |
'''simple docstring'''
import numpy as np
def A_( A : str , A : Optional[Any] , A : Tuple , A : Optional[int] , A : str):
UpperCamelCase = int(np.ceil((x_end - xa) / h))
UpperCamelCase = np.zeros((n + 1,))
UpperCamelCase = ya
UpperCamelCase = xa
for k in range(A):
UpperCamelCase = f(A , y[k])
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + h , y[k] + h * ka)
UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class a ( snake_case_ ):
"""simple docstring"""
__lowerCAmelCase = 4_2
__lowerCAmelCase = 4_2
def UpperCamelCase__ ( _lowercase : str ) -> Union[str, Any]:
if not isinstance(_lowercase , _lowercase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_lowercase ) )]
def UpperCamelCase__ ( _lowercase : str ) -> int:
if not isinstance(_lowercase , _lowercase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
__UpperCAmelCase: str = all_rotations(_lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__UpperCAmelCase: List[Any] = {
"""bwt_string""": """""".join([word[-1] for word in rotations] ),
"""idx_original_string""": rotations.index(_lowercase ),
}
return response
def UpperCamelCase__ ( _lowercase : str , _lowercase : int ) -> List[str]:
if not isinstance(_lowercase , _lowercase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
__UpperCAmelCase: Union[str, Any] = int(_lowercase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_lowercase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
__UpperCAmelCase: Optional[Any] = [""""""] * len(_lowercase )
for _ in range(len(_lowercase ) ):
for i in range(len(_lowercase ) ):
__UpperCAmelCase: Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 'Provide a string that I will generate its BWT transform: '
SCREAMING_SNAKE_CASE_ = input(entry_msg).strip()
SCREAMING_SNAKE_CASE_ = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result["bwt_string"]}'"""
)
SCREAMING_SNAKE_CASE_ = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
F"""we get original string '{original_string}'"""
) | 523 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True})
lowerCAmelCase_ = Features({"""text""": Value("""string""")})
lowerCAmelCase_ = Features({})
lowerCAmelCase_ = "text"
@property
def UpperCAmelCase_ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 3 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ : Any = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowercase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 376 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A_( A : list[float]):
UpperCamelCase = []
UpperCamelCase = len(A)
for i in range(A):
UpperCamelCase = -1
for j in range(i + 1 , A):
if arr[i] < arr[j]:
UpperCamelCase = arr[j]
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = []
for i, outer in enumerate(A):
UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase = inner
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = len(A)
UpperCamelCase = []
UpperCamelCase = [-1] * arr_size
for index in reversed(range(A)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class snake_case__ :
def __init__( self : Optional[int] , __a : List[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(A_ )
self.set_fail_transitions()
def A_ ( self : Dict , __a : Any , __a : Any ) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A_ ( self : str , __a : int ) -> None:
'''simple docstring'''
__snake_case : Union[str, Any] = 0
for character in keyword:
__snake_case : int = self.find_next_state(A_ , A_ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__snake_case : Optional[Any] = len(self.adlist ) - 1
else:
__snake_case : Any = next_state
self.adlist[current_state]["output"].append(A_ )
def A_ ( self : Any ) -> None:
'''simple docstring'''
__snake_case : Tuple = deque()
for node in self.adlist[0]["next_states"]:
q.append(A_ )
__snake_case : Optional[int] = 0
while q:
__snake_case : Optional[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A_ )
__snake_case : List[str] = self.adlist[r]['fail_state']
while (
self.find_next_state(A_ , self.adlist[child]['value'] ) is None
and state != 0
):
__snake_case : Tuple = self.adlist[state]['fail_state']
__snake_case : Tuple = self.find_next_state(
A_ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
__snake_case : Tuple = 0
__snake_case : Tuple = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def A_ ( self : Optional[Any] , __a : int ) -> dict[str, list[int]]:
'''simple docstring'''
__snake_case : Any = {} # returns a dict with keywords and list of its occurrences
__snake_case : Dict = 0
for i in range(len(A_ ) ):
while (
self.find_next_state(A_ , string[i] ) is None
and current_state != 0
):
__snake_case : Tuple = self.adlist[current_state]['fail_state']
__snake_case : str = self.find_next_state(A_ , string[i] )
if next_state is None:
__snake_case : Union[str, Any] = 0
else:
__snake_case : Tuple = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__snake_case : Any = []
result[key].append(i - len(A_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def A_( A : str):
if not sentence:
return ""
UpperCamelCase = dict(zip(A , A))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] ):
snake_case__ : Optional[Any] = ""
snake_case__ : Union[str, Any] = ""
snake_case__ : List[Any] = []
snake_case__ : Any = 0
snake_case__ : Tuple = 2_5_6
snake_case__ : Optional[int] = 0
snake_case__ : Optional[Any] = 0
snake_case__ : Tuple = 0
snake_case__ : Dict = 0
def _lowercase ( self : Any , __A : Union[str, Any] ):
snake_case__ : Union[str, Any] = cva.imread(A_ , 0 )
snake_case__ : List[Any] = copy.deepcopy(self.img )
snake_case__, snake_case__, snake_case__ : Union[str, Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
snake_case__ : Optional[Any] = np.sum(A_ )
for i in range(len(A_ ) ):
snake_case__ : List[Any] = x[i] / self.k
self.sk += prk
snake_case__ : List[str] = (self.L - 1) * self.sk
if self.rem != 0:
snake_case__ : Optional[Any] = int(last % last )
snake_case__ : Tuple = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
snake_case__ : List[str] = int(np.ma.count(self.img ) / self.img[1].size )
snake_case__ : int = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case__ : List[Any] = self.img[j][i]
if num != self.last_list[num]:
snake_case__ : Union[str, Any] = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _lowercase ( self : Optional[Any] ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _lowercase ( self : Any ):
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
__lowerCamelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 297 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : str = 'RegNetConfig'
# Base docstring
lowerCAmelCase : str = 'facebook/regnet-y-040'
lowerCAmelCase : Dict = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Dict = 'facebook/regnet-y-040'
lowerCAmelCase : int = 'tabby, tabby cat'
lowerCAmelCase : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.convolution(self.padding(A_ ) )
UpperCamelCase = self.normalization(A_ )
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config.num_channels
UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) )
UpperCamelCase = self.embedder(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 2 , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def UpperCAmelCase_ ( self , A_ , A_ = False )-> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(A_ ) , training=A_ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
UpperCamelCase = [
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.pooler(A_ )
for layer_module in self.attention:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Any:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) )
def UpperCAmelCase_ ( self , A_ , A_ = False , A_ = True )-> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(A_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
lowerCAmelCase_ = RegNetConfig
def __init__( self , A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config
UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' )
UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(A_ , training=A_ )
UpperCamelCase = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = """regnet"""
lowerCAmelCase_ = """pixel_values"""
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : str = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
# classification head
UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier[0](A_ )
UpperCamelCase = self.classifier[1](A_ )
UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 3 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_(snake_case_ , unittest.TestCase ):
"""simple docstring"""
a_ : Any = None
a_ : Dict = BloomTokenizerFast
a_ : Dict = BloomTokenizerFast
a_ : Optional[int] = True
a_ : List[str] = False
a_ : str = """tokenizer_file"""
a_ : Union[str, Any] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def _lowerCAmelCase ( self ):
super().setUp()
_lowerCamelCase : List[str] = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , **A ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
_lowerCamelCase : str = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_lowerCamelCase : Optional[int] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
_lowerCamelCase : List[str] = tokenizer.batch_encode_plus(A_ )['input_ids']
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Tuple = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _lowerCAmelCase ( self , A=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_lowerCamelCase : Union[str, Any] = 'This is a simple input'
_lowerCamelCase : Any = ['This is a simple input 1', 'This is a simple input 2']
_lowerCamelCase : List[str] = ('This is a simple input', 'This is a pair')
_lowerCamelCase : Any = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(A_ , max_length=A_ )
tokenizer_r.encode_plus(A_ , max_length=A_ )
tokenizer_r.batch_encode_plus(A_ , max_length=A_ )
tokenizer_r.encode(A_ , max_length=A_ )
tokenizer_r.batch_encode_plus(A_ , max_length=A_ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
_lowerCamelCase : List[Any] = None # Hotfixing padding = None
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = self.get_rust_tokenizer()
_lowerCamelCase : Any = load_dataset('xnli' , 'all_languages' , split='test' , streaming=A_ )
_lowerCamelCase : List[str] = next(iter(A_ ) )['premise'] # pick up one data
_lowerCamelCase : Dict = list(sample_data.values() )
_lowerCamelCase : Optional[int] = list(map(tokenizer.encode , A_ ) )
_lowerCamelCase : int = [tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ ) for x in output_tokens]
self.assertListEqual(A_ , A_ )
def _lowerCAmelCase ( self ):
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 437 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """perceiver"""
def __init__( self , A_=256 , A_=1280 , A_=768 , A_=1 , A_=26 , A_=8 , A_=8 , A_=None , A_=None , A_="kv" , A_=1 , A_=1 , A_="gelu" , A_=0.1 , A_=0.02 , A_=1e-12 , A_=True , A_=262 , A_=2048 , A_=56 , A_=[368, 496] , A_=16 , A_=1920 , A_=16 , A_=[1, 16, 224, 224] , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = num_latents
UpperCamelCase = d_latents
UpperCamelCase = d_model
UpperCamelCase = num_blocks
UpperCamelCase = num_self_attends_per_block
UpperCamelCase = num_self_attention_heads
UpperCamelCase = num_cross_attention_heads
UpperCamelCase = qk_channels
UpperCamelCase = v_channels
UpperCamelCase = cross_attention_shape_for_attention
UpperCamelCase = self_attention_widening_factor
UpperCamelCase = cross_attention_widening_factor
UpperCamelCase = hidden_act
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_query_residual
# masked language modeling attributes
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
# image classification attributes
UpperCamelCase = image_size
# flow attributes
UpperCamelCase = train_size
# multimodal autoencoding attributes
UpperCamelCase = num_frames
UpperCamelCase = audio_samples_per_frame
UpperCamelCase = samples_per_patch
UpperCamelCase = output_shape
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
return 1e-4
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , )-> Mapping[str, Any]:
'''simple docstring'''
if isinstance(A_ , A_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = preprocessor.num_special_tokens_to_add(A_ )
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join(['a'] ) * seq_length] * batch_size
UpperCamelCase = dict(preprocessor(A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(A_ , A_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase = self._generate_dummy_images(A_ , A_ , A_ , A_ )
UpperCamelCase = dict(preprocessor(images=A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 3 | 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def _UpperCAmelCase ( UpperCamelCase: Dict ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
__lowerCAmelCase = k.replace(UpperCamelCase , UpperCamelCase )
return k
def _UpperCAmelCase ( UpperCamelCase: dict , UpperCamelCase: dict ):
"""simple docstring"""
__lowerCAmelCase = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase )
__lowerCAmelCase = PegasusConfig(**UpperCamelCase )
__lowerCAmelCase = PegasusForConditionalGeneration(UpperCamelCase )
__lowerCAmelCase = torch_model.model.state_dict()
__lowerCAmelCase = {}
for k, v in tf_weights.items():
__lowerCAmelCase = rename_state_dict_key(UpperCamelCase )
if new_k not in sd:
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__lowerCAmelCase = v.T
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__lowerCAmelCase = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
__lowerCAmelCase = mapping["shared.weight"]
__lowerCAmelCase = mapping["shared.weight"]
__lowerCAmelCase = {k: torch.zeros_like(UpperCamelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = torch_model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
__lowerCAmelCase = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], F"no matches found for the following tf keys {extra}"
return torch_model
def _UpperCAmelCase ( UpperCamelCase: Tuple="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
__lowerCAmelCase = tf.train.list_variables(UpperCamelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCamelCase , desc="converting tf checkpoint to dict" ):
__lowerCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCAmelCase = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = array
return tf_weights
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = Path(UpperCamelCase ).parent.name
__lowerCAmelCase = task_specific_params[F"summarization_{dataset}"]["max_position_embeddings"]
__lowerCAmelCase = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase )
# convert model
__lowerCAmelCase = get_tf_weights_as_numpy(UpperCamelCase )
__lowerCAmelCase = task_specific_params[F"summarization_{dataset}"]
if dataset == "large":
__lowerCAmelCase = task_specific_params
__lowerCAmelCase = convert_pegasus(UpperCamelCase , UpperCamelCase )
torch_model.save_pretrained(UpperCamelCase )
__lowerCAmelCase = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCamelCase , Path(UpperCamelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 611 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
A_ = 'us-east-1' # defaults region
@dataclass
class lowercase_ :
A_ = 4_2
A_ = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
A_ = {
"task_name": "mnli",
"per_device_train_batch_size": 1_6,
"per_device_eval_batch_size": 1_6,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_0_0,
"save_steps": 5_5_0_0,
}
A_ = {**hyperparameters, "max_steps": 1_0_0_0}
@property
def _lowerCAmelCase ( self : int ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _lowerCAmelCase ( self : Any ):
return F"{self.framework}-transfromers-test"
@property
def _lowerCAmelCase ( self : int ):
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def _lowerCAmelCase ( self : Optional[int] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
snake_case__ : Tuple = SageMakerTestEnvironment(framework=request.cls.framework )
| 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
__UpperCamelCase :Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
def UpperCamelCase( __UpperCamelCase : list[int] ):
lowerCAmelCase_ : List[str] = []
if len(__UpperCamelCase ) == 1:
return [nums.copy()]
for _ in range(len(__UpperCamelCase ) ):
lowerCAmelCase_ : Optional[Any] = nums.pop(0 )
lowerCAmelCase_ : List[Any] = permute(__UpperCamelCase )
for perm in permutations:
perm.append(__UpperCamelCase )
result.extend(__UpperCamelCase )
nums.append(__UpperCamelCase )
return result
def UpperCamelCase( __UpperCamelCase : str ):
def backtrack(__UpperCamelCase : str ):
if start == len(__UpperCamelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__UpperCamelCase ,len(__UpperCamelCase ) ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = nums[i], nums[start]
backtrack(start + 1 )
lowerCAmelCase_ , lowerCAmelCase_ : str = nums[i], nums[start] # backtrack
lowerCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
A__ : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 171 |
'''simple docstring'''
def A_( A : list[int]):
UpperCamelCase = []
if len(A) == 1:
return [nums.copy()]
for _ in range(len(A)):
UpperCamelCase = nums.pop(0)
UpperCamelCase = permute(A)
for perm in permutations:
perm.append(A)
result.extend(A)
nums.append(A)
return result
def A_( A : str):
def backtrack(A : str):
if start == len(A) - 1:
output.append(nums[:])
else:
for i in range(A , len(A)):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1)
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 3 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
stooge(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
SCREAMING_SNAKE_CASE_ : List[str] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE , i + t , (SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowerCAmelCase__: Any = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase__: Optional[int] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 345 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def A_( A : float , A : float , A : int):
UpperCamelCase = x
UpperCamelCase = y
for step in range(A): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(A , 1 , 1))
def A_( A : int = 800 , A : int = 600 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 50 , A : bool = True , ):
UpperCamelCase = Image.new('RGB' , (image_width, image_height))
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(A):
for image_y in range(A):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(A , A , A)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(A)
else:
UpperCamelCase = get_black_and_white_rgb(A)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 3 | 0 |
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> int:
__A : Optional[Any] = [0] * len(a__ )
for i in range(1 ,len(a__ ) ):
# use last results for better performance - dynamic programming
__A : Dict = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__A : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__A : Union[str, Any] = j
return prefix_result
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> int:
return max(prefix_function(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a ( snake_case_ ):
"""simple docstring"""
__lowerCAmelCase = """yolos"""
def __init__( self , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0_2 , snake_case_=1e-1_2 , snake_case_=[512, 864] , snake_case_=16 , snake_case_=3 , snake_case_=True , snake_case_=100 , snake_case_=True , snake_case_=False , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=5 , snake_case_=2 , snake_case_=0.1 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**A_ )
__UpperCAmelCase: List[str] = hidden_size
__UpperCAmelCase: Tuple = num_hidden_layers
__UpperCAmelCase: List[Any] = num_attention_heads
__UpperCAmelCase: Optional[int] = intermediate_size
__UpperCAmelCase: List[Any] = hidden_act
__UpperCAmelCase: str = hidden_dropout_prob
__UpperCAmelCase: List[Any] = attention_probs_dropout_prob
__UpperCAmelCase: int = initializer_range
__UpperCAmelCase: Optional[Any] = layer_norm_eps
__UpperCAmelCase: int = image_size
__UpperCAmelCase: str = patch_size
__UpperCAmelCase: str = num_channels
__UpperCAmelCase: List[str] = qkv_bias
__UpperCAmelCase: Dict = num_detection_tokens
__UpperCAmelCase: List[str] = use_mid_position_embeddings
__UpperCAmelCase: Optional[Any] = auxiliary_loss
# Hungarian matcher
__UpperCAmelCase: str = class_cost
__UpperCAmelCase: List[str] = bbox_cost
__UpperCAmelCase: Tuple = giou_cost
# Loss coefficients
__UpperCAmelCase: Any = bbox_loss_coefficient
__UpperCAmelCase: List[Any] = giou_loss_coefficient
__UpperCAmelCase: Any = eos_coefficient
class a ( snake_case_ ):
"""simple docstring"""
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowercase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase_ ( self ):
'''simple docstring'''
return 1e-4
@property
def lowercase_ ( self ):
'''simple docstring'''
return 12 | 523 |
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3 | 0 |
from typing import Any
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if not input_list:
return []
snake_case_ = [input_list.count(_A ) for value in input_list]
snake_case_ = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3 | 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> Dict:
__snake_case , __snake_case : Optional[int] = [], []
while len(_UpperCAmelCase ) > 1:
__snake_case , __snake_case : Dict = min(_UpperCAmelCase ), max(_UpperCAmelCase )
start.append(_UpperCAmelCase )
end.append(_UpperCAmelCase )
collection.remove(_UpperCAmelCase )
collection.remove(_UpperCAmelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
A__ : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
A__ : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 286 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """unispeech-sat"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 3 | 0 |
import numpy as np
__lowerCamelCase : Union[str, Any] = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Any ):
snake_case__ : List[str] = np.array(A_ )
def _lowercase ( self : Any , __A : List[Any] ):
snake_case__, snake_case__ : Any = np.where(letter == self.SQUARE )
snake_case__ : Tuple = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _lowercase ( self : Tuple , __A : Any , __A : Tuple ):
snake_case__ : Optional[int] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _lowercase ( self : List[Any] , __A : Optional[int] ):
snake_case__ : Optional[Any] = message.lower()
snake_case__ : Any = message.replace(" " , "" )
snake_case__ : Dict = message.replace("j" , "i" )
snake_case__ : List[str] = np.empty((2, len(A_ )) )
for letter_index in range(len(A_ ) ):
snake_case__ : int = self.letter_to_numbers(message[letter_index] )
snake_case__ : Tuple = numbers[0]
snake_case__ : Optional[Any] = numbers[1]
snake_case__ : Any = first_step.reshape(2 * len(A_ ) )
snake_case__ : List[Any] = ""
for numbers_index in range(len(A_ ) ):
snake_case__ : Dict = int(second_step[numbers_index * 2] )
snake_case__ : int = int(second_step[(numbers_index * 2) + 1] )
snake_case__ : List[Any] = self.numbers_to_letter(A_ , A_ )
snake_case__ : List[Any] = encoded_message + letter
return encoded_message
def _lowercase ( self : Tuple , __A : Dict ):
snake_case__ : Any = message.lower()
message.replace(" " , "" )
snake_case__ : Tuple = np.empty(2 * len(A_ ) )
for letter_index in range(len(A_ ) ):
snake_case__ : List[Any] = self.letter_to_numbers(message[letter_index] )
snake_case__ : List[str] = numbers[0]
snake_case__ : List[str] = numbers[1]
snake_case__ : Optional[int] = first_step.reshape((2, len(A_ )) )
snake_case__ : Union[str, Any] = ""
for numbers_index in range(len(A_ ) ):
snake_case__ : Optional[Any] = int(second_step[0, numbers_index] )
snake_case__ : Tuple = int(second_step[1, numbers_index] )
snake_case__ : Dict = self.numbers_to_letter(A_ , A_ )
snake_case__ : Optional[Any] = decoded_message + letter
return decoded_message
| 297 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=100 , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=[0, 1, 2, 3] , )-> Any:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = 100
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = out_indices
UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = BeitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = BeitForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase = False
UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=A_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BeitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
# prepare bool_masked_pos
UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(pixel_values=A_ , bool_masked_pos=A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(A_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21841) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
UpperCamelCase = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=A_ , )
else:
UpperCamelCase = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = BeitImageProcessor(do_resize=A_ , size=640 , do_center_crop=A_ )
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] )
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , A_ )
| 3 | 0 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 437 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """generated"""
def __init__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = {}
if truncation is not None:
UpperCamelCase = truncation
UpperCamelCase = generate_kwargs
UpperCamelCase = {}
if return_tensors is not None and return_type is None:
UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
return True
def UpperCAmelCase_ ( self , *A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , A_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
UpperCamelCase = ([prefix + arg for arg in args[0]],)
UpperCamelCase = True
elif isinstance(args[0] , A_ ):
UpperCamelCase = (prefix + args[0],)
UpperCamelCase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCamelCase = self.tokenizer(*A_ , padding=A_ , truncation=A_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = super().__call__(*A_ , **A_ )
if (
isinstance(args[0] , A_ )
and all(isinstance(A_ , A_ ) for el in args[0] )
and all(len(A_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self , A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self._parse_and_tokenize(A_ , truncation=A_ , **A_ )
return inputs
def UpperCAmelCase_ ( self , A_ , **A_ )-> int:
'''simple docstring'''
if self.framework == "pt":
UpperCamelCase , UpperCamelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
UpperCamelCase , UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy()
UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(A_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
UpperCamelCase = self.model.generate(**A_ , **A_ )
UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCamelCase = output_ids.reshape(A_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCamelCase = tf.reshape(A_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self , A_ , A_=ReturnType.TEXT , A_=False )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCamelCase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCamelCase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
}
records.append(A_ )
return records
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """summary"""
def __call__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """translation"""
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase_ ( self , *A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , A_=None , A_=None )-> Dict:
'''simple docstring'''
if getattr(self.tokenizer , '_build_translation_inputs' , A_ ):
return self.tokenizer._build_translation_inputs(
*A_ , return_tensors=self.framework , truncation=A_ , src_lang=A_ , tgt_lang=A_ )
else:
return super()._parse_and_tokenize(*A_ , truncation=A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , **A_ )-> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = super()._sanitize_parameters(**A_ )
if src_lang is not None:
UpperCamelCase = src_lang
if tgt_lang is not None:
UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCamelCase = kwargs.get('task' , self.task )
UpperCamelCase = task.split('_' )
if task and len(A_ ) == 4:
# translation, XX, to YY
UpperCamelCase = items[1]
UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
| 3 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
__lowerCAmelCase = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(A_ )
from datasets import load_dataset
__lowerCAmelCase = load_dataset("nielsr/rvlcdip-demo" )
__lowerCAmelCase = dataset["train"][0]["image"].convert("RGB" )
__lowerCAmelCase = image_processor(A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**A_ )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , A_ )
__lowerCAmelCase = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=A_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
| 611 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
lowerCAmelCase_ = 3.0
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : List[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : int = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : Dict = ''
lowerCAmelCase : Dict = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 3 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( ) -> Dict:
for n in range(1 , 100_0000 ):
yield n * (n + 1) // 2
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> Any:
snake_case__ : Union[str, Any] = 1
snake_case__ : List[str] = 2
while i * i <= n:
snake_case__ : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCamelCase__ ( ) -> List[Any]:
return next(i for i in triangle_number_generator() if count_divisors(__SCREAMING_SNAKE_CASE ) > 500 )
if __name__ == "__main__":
print(solution())
| 270 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None )-> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> torch.FloatTensor:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowercase = logging.get_logger(__name__)
__lowercase = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class lowerCamelCase_ ( snake_case_ ):
'''simple docstring'''
a__ : Any = """imagegpt"""
a__ : Dict = ["""past_key_values"""]
a__ : Dict = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=512 + 1 , __lowercase=32 * 32 , __lowercase=512 , __lowercase=24 , __lowercase=8 , __lowercase=None , __lowercase="quick_gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-5 , __lowercase=0.02 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , **__lowercase , ) -> str:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Dict = n_positions
__UpperCamelCase :Any = n_embd
__UpperCamelCase :Tuple = n_layer
__UpperCamelCase :Union[str, Any] = n_head
__UpperCamelCase :List[str] = n_inner
__UpperCamelCase :List[str] = activation_function
__UpperCamelCase :Dict = resid_pdrop
__UpperCamelCase :str = embd_pdrop
__UpperCamelCase :Tuple = attn_pdrop
__UpperCamelCase :Union[str, Any] = layer_norm_epsilon
__UpperCamelCase :Union[str, Any] = initializer_range
__UpperCamelCase :Any = scale_attn_weights
__UpperCamelCase :Tuple = use_cache
__UpperCamelCase :Any = scale_attn_by_inverse_layer_idx
__UpperCamelCase :Tuple = reorder_and_upcast_attn
__UpperCamelCase :Tuple = tie_word_embeddings
super().__init__(tie_word_embeddings=A_ , **A_)
class lowerCamelCase_ ( snake_case_ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
])
def UpperCamelCase__ ( self , __lowercase , __lowercase = 1 , __lowercase = -1 , __lowercase = False , __lowercase = None , __lowercase = 3 , __lowercase = 32 , __lowercase = 32 , ) -> Mapping[str, Any]:
__UpperCamelCase :Dict = self._generate_dummy_images(A_ , A_ , A_ , A_)
__UpperCamelCase :Union[str, Any] = dict(preprocessor(images=A_ , return_tensors=A_))
return inputs
| 167 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
A__ : Any = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 171 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None )-> Optional[Any]:
'''simple docstring'''
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ )-> List[Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self , A_ , A_ = False )-> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
self.generated_responses.append(A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self )-> Any:
'''simple docstring'''
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
snake_case_ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self , A_ , A_=32 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=10 , **A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=True )-> Tuple:
'''simple docstring'''
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 3 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__: Any = logging.get_logger(__name__)
lowerCAmelCase__: Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
lowerCAmelCase__: str = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__: int = {
'allenai/longformer-base-4096': 4096,
'allenai/longformer-large-4096': 4096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = bs[:]
SCREAMING_SNAKE_CASE_ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : Tuple = [chr(SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : List[Any] = set()
SCREAMING_SNAKE_CASE_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : Tuple = char
return pairs
class snake_case_ ( snake_case_ ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any = ['input_ids', 'attention_mask']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : str = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
SCREAMING_SNAKE_CASE_ : Any = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
SCREAMING_SNAKE_CASE_ : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : str = json.load(A_ )
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : int = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = merges_handle.read().split('\n' )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Dict = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , __lowerCAmelCase ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tuple(A_ )
SCREAMING_SNAKE_CASE_ : Any = get_pairs(A_ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : List[Any] = min(A_ , key=lambda __lowerCAmelCase : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = bigram
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Tuple = 0
while i < len(A_ ):
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : Tuple = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple(A_ )
SCREAMING_SNAKE_CASE_ : Dict = new_word
if len(A_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : int = get_pairs(A_ )
SCREAMING_SNAKE_CASE_ : Tuple = ' '.join(A_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = word
return word
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = []
for token in re.findall(self.pat , A_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def __A ( self , __lowerCAmelCase ):
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __A ( self , __lowerCAmelCase ):
return self.decoder.get(A_ )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = ''.join(A_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : int = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
SCREAMING_SNAKE_CASE_ : Any = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Optional[int] = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ' ' + text
return (text, kwargs)
| 345 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase_ ( snake_case_ ):
def lowerCAmelCase_ ( self : List[str] ):
__A : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , """width_multiplier""" ) )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , __A : Optional[int] , __A : Dict=13 , __A : Union[str, Any]=64 , __A : List[Any]=2 , __A : Union[str, Any]=3 , __A : Optional[Any]="swish" , __A : List[str]=3 , __A : List[Any]=32 , __A : Optional[int]=0.1 , __A : Union[str, Any]=0.0_2 , __A : Any=True , __A : Optional[int]=True , __A : Any=10 , __A : Optional[Any]=None , __A : Optional[int]=0.2_5 , __A : Tuple=0.0 , __A : List[str]=0.0 , ):
__A : int = parent
__A : Tuple = batch_size
__A : Optional[int] = image_size
__A : List[str] = patch_size
__A : Dict = num_channels
__A : int = make_divisible(512 * width_multiplier , divisor=8 )
__A : int = hidden_act
__A : List[Any] = conv_kernel_size
__A : int = output_stride
__A : Dict = classifier_dropout_prob
__A : Dict = use_labels
__A : int = is_training
__A : Union[str, Any] = num_labels
__A : Any = initializer_range
__A : List[Any] = scope
__A : Any = width_multiplier
__A : Any = ffn_dropout
__A : List[Any] = attn_dropout
def lowerCAmelCase_ ( self : Dict ):
__A : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Optional[int] = None
__A : Dict = None
if self.use_labels:
__A : int = ids_tensor([self.batch_size] , self.num_labels )
__A : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self : List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCAmelCase_ ( self : Optional[int] , __A : Any , __A : str , __A : int , __A : List[str] ):
__A : Any = MobileViTVaModel(config=A_ )
model.to(A_ )
model.eval()
__A : str = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase_ ( self : int , __A : int , __A : List[Any] , __A : str , __A : List[str] ):
__A : Tuple = self.num_labels
__A : Union[str, Any] = MobileViTVaForImageClassification(A_ )
model.to(A_ )
model.eval()
__A : int = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[Any] , __A : Tuple , __A : Dict , __A : int , __A : Dict ):
__A : Optional[int] = self.num_labels
__A : List[Any] = MobileViTVaForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
__A : Optional[Any] = model(A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__A : Optional[int] = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase_ ( self : str ):
__A : List[Any] = self.prepare_config_and_inputs()
__A , __A , __A , __A : int = config_and_inputs
__A : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase : List[str] = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase : List[Any] = False
_lowercase : Any = False
_lowercase : List[Any] = False
_lowercase : Any = False
def lowerCAmelCase_ ( self : str ):
__A : str = MobileViTVaModelTester(self )
__A : Tuple = MobileViTVaConfigTester(self , config_class=A_ , has_text_modality=A_ )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCAmelCase_ ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : str ):
__A , __A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[int] = model_class(A_ )
__A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Tuple = [*signature.parameters.keys()]
__A : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A_ )
def lowerCAmelCase_ ( self : List[Any] ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowerCAmelCase_ ( self : Any ):
def check_hidden_states_output(__A : Union[str, Any] , __A : Dict , __A : Dict ):
__A : Any = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(A_ , A_ ) )
__A : Tuple = outputs.hidden_states
__A : List[str] = 5
self.assertEqual(len(A_ ) , A_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__A : str = 2
for i in range(len(A_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Dict = True
check_hidden_states_output(A_ , A_ , A_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def lowerCAmelCase_ ( self : int ):
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Dict = MobileViTVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : Tuple ):
__A : Optional[int] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
A_ )
__A : List[Any] = self.default_image_processor
__A : Optional[int] = prepare_img()
__A : str = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
__A : List[Any] = model(**A_ )
# verify the logits
__A : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__A : Tuple = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Dict ):
__A : Dict = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__A : Union[str, Any] = model.to(A_ )
__A : List[Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__A : Optional[Any] = prepare_img()
__A : int = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
__A : Dict = model(**A_ )
__A : Optional[Any] = outputs.logits
# verify the logits
__A : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A_ )
__A : Any = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : str = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__A : Dict = model.to(A_ )
__A : Dict = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__A : str = prepare_img()
__A : str = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
__A : List[Any] = model(**A_ )
__A : int = outputs.logits.detach().cpu()
__A : Any = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(50, 60)] )
__A : Union[str, Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A_ )
__A : List[Any] = image_processor.post_process_semantic_segmentation(outputs=A_ )
__A : Optional[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A_ )
| 17 |
'''simple docstring'''
import numpy as np
def A_( A : str , A : Optional[Any] , A : Tuple , A : Optional[int] , A : str):
UpperCamelCase = int(np.ceil((x_end - xa) / h))
UpperCamelCase = np.zeros((n + 1,))
UpperCamelCase = ya
UpperCamelCase = xa
for k in range(A):
UpperCamelCase = f(A , y[k])
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + h , y[k] + h * ka)
UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCamelCase__ ( _lowercase : Optional[Any]=3_2 , _lowercase : List[Any]=1_0 , _lowercase : Union[str, Any]=1_0_0 , _lowercase : List[str]=1_0_2_6 , _lowercase : List[Any]=True , _lowercase : Dict="data/tokenized_stories_train_wikitext103.jbl" , _lowercase : Dict="igf_context_pairs.jbl" , ) -> Optional[int]:
set_seed(3 )
# generate train_data and objective_set
__UpperCAmelCase, __UpperCAmelCase: Dict = generate_datasets(
_lowercase , _lowercase , number=_lowercase , min_len=1_0_2_6 , trim=_lowercase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__UpperCAmelCase: Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
__UpperCAmelCase: Any = load_gpta("""gpt2""" ).to(_lowercase )
print("""computing perplexity on objective set""" )
__UpperCAmelCase: Optional[int] = compute_perplexity(_lowercase , _lowercase , _lowercase ).item()
print("""perplexity on objective set:""" , _lowercase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCamelCase__ ( _lowercase : Union[str, Any] , _lowercase : Any=1_5 , _lowercase : List[str]=1_2_8 , _lowercase : List[str]=1_0_0 , _lowercase : Tuple="igf_model.pt" , ) -> Optional[int]:
set_seed(4_2 )
# Load pre-trained model
__UpperCAmelCase: Tuple = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
__UpperCAmelCase: List[Any] = SecondaryLearner(_lowercase )
# Train secondary learner
__UpperCAmelCase: List[Any] = train_secondary_learner(
_lowercase , _lowercase , max_epochs=_lowercase , batch_size=_lowercase , eval_freq=1_0_0 , igf_model_path=_lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCamelCase__ ( _lowercase : Tuple , _lowercase : List[Any] , _lowercase : int , _lowercase : Optional[int]=3_2 , _lowercase : Any=1_0_0_0 , _lowercase : List[str]=1_6 , _lowercase : List[str]=1.0 , _lowercase : Union[str, Any]=recopy_gpta , _lowercase : Any=None , _lowercase : Dict=1_0 , _lowercase : Any="gpt2_finetuned.pt" , ) -> List[Any]:
__UpperCAmelCase: Any = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase: List[Any] = RandomSampler(_lowercase )
__UpperCAmelCase: List[Any] = DataLoader(_lowercase , sampler=_lowercase )
__UpperCAmelCase: Dict = max_steps // (len(_lowercase )) + 1
__UpperCAmelCase: Optional[int] = 0
__UpperCAmelCase: Optional[int] = torch.zeros((1, context_len) , dtype=torch.long , device=_lowercase )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Tuple = recopy_model(_lowercase , _lowercase , _lowercase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowercase )
secondary_learner.eval()
__UpperCAmelCase: List[str] = []
__UpperCAmelCase: Any = 0
__UpperCAmelCase: Optional[Any] = []
__UpperCAmelCase: Tuple = []
# Compute the performance of the transformer model at the beginning
__UpperCAmelCase: List[str] = compute_perplexity(_lowercase , _lowercase , _lowercase )
test_perps.append(_lowercase )
print("""Test perplexity, step""" , _lowercase , """:""" , _lowercase )
for epoch in range(int(_lowercase ) ):
for step, example in enumerate(_lowercase ):
torch.cuda.empty_cache()
__UpperCAmelCase: Optional[Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
__UpperCAmelCase: int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__UpperCAmelCase: Tuple = model(_lowercase , labels=_lowercase )
__UpperCAmelCase: Optional[Any] = True
if secondary_learner is not None:
__UpperCAmelCase: str = secondary_learner.forward(
torch.tensor(_lowercase , dtype=torch.long , device=_lowercase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_lowercase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
__UpperCAmelCase: Optional[int] = -1
if predicted_q < threshold:
__UpperCAmelCase: List[Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__UpperCAmelCase: List[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__UpperCAmelCase: Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__UpperCAmelCase: Optional[Any] = compute_perplexity(_lowercase , _lowercase , _lowercase )
test_perps.append(_lowercase )
print("""Test perplexity, step""" , _lowercase , """:""" , _lowercase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowercase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCamelCase__ ( ) -> Tuple:
__UpperCAmelCase: Any = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=_lowercase , default=_lowercase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=_lowercase , default=_lowercase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=_lowercase , type=_lowercase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=_lowercase , default=_lowercase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=3_2 , type=_lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=1_0_0 , type=_lowercase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=1_0_0 , type=_lowercase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_0_0_0 , type=_lowercase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=1_2_8 , type=_lowercase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=1_6 , type=_lowercase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=1_0 , type=_lowercase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=1_0_0 , type=_lowercase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_0_2_6 , type=_lowercase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=1_5 , type=_lowercase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=_lowercase , type=_lowercase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=_lowercase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=_lowercase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=_lowercase , type=_lowercase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=_lowercase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
__UpperCAmelCase: str = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
__UpperCAmelCase: List[Any] = training_secondary_learner(
_lowercase , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
__UpperCAmelCase: Tuple = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = generate_datasets(
context_len=3_2 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_0_0 , min_len=1_0_2_6 , trim=_lowercase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowercase , _lowercase , _lowercase , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=_lowercase , secondary_learner=_lowercase , eval_interval=1_0 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main() | 523 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True})
lowerCAmelCase_ = Features({"""text""": Value("""string""")})
lowerCAmelCase_ = Features({})
lowerCAmelCase_ = "text"
@property
def UpperCAmelCase_ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 3 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ , snake_case_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_A ):
for j in range(_A ):
snake_case_ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase__ : int = imread("image_data/lena.jpg", 1)
# convert to its negative
lowercase__ : List[Any] = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 376 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A_( A : list[float]):
UpperCamelCase = []
UpperCamelCase = len(A)
for i in range(A):
UpperCamelCase = -1
for j in range(i + 1 , A):
if arr[i] < arr[j]:
UpperCamelCase = arr[j]
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = []
for i, outer in enumerate(A):
UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase = inner
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = len(A)
UpperCamelCase = []
UpperCamelCase = [-1] * arr_size
for index in reversed(range(A)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 3 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case__ ( snake_case_ ):
A__ = '''realm'''
def __init__( self : int , __a : str=30522 , __a : List[str]=768 , __a : Optional[int]=128 , __a : Any=12 , __a : Optional[Any]=12 , __a : int=8 , __a : str=3072 , __a : List[Any]="gelu_new" , __a : Dict=0.1 , __a : Tuple=0.1 , __a : List[Any]=512 , __a : Any=2 , __a : List[str]=0.0_2 , __a : List[Any]=1e-12 , __a : Any=256 , __a : Tuple=10 , __a : List[str]=1e-3 , __a : Any=5 , __a : List[Any]=320 , __a : str=13353718 , __a : Dict=5000 , __a : Optional[Any]=1 , __a : List[str]=0 , __a : Any=2 , **__a : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
# Common config
__snake_case : Tuple = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : int = hidden_size
__snake_case : List[str] = retriever_proj_size
__snake_case : str = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Union[str, Any] = num_candidates
__snake_case : Optional[Any] = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : int = initializer_range
__snake_case : str = type_vocab_size
__snake_case : Union[str, Any] = layer_norm_eps
# Reader config
__snake_case : Dict = span_hidden_size
__snake_case : Optional[Any] = max_span_width
__snake_case : Any = reader_layer_norm_eps
__snake_case : Optional[Any] = reader_beam_size
__snake_case : Optional[Any] = reader_seq_len
# Retrieval config
__snake_case : List[str] = num_block_records
__snake_case : Union[str, Any] = searcher_beam_size
| 286 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def A_( A : str):
if not sentence:
return ""
UpperCamelCase = dict(zip(A , A))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[Any] , __A : Tuple , __A : Union[str, Any] ):
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(A_ ) for s in shape] )}.npy'''
def _lowercase ( self : List[Any] ):
super().tearDown()
gc.collect()
def _lowercase ( self : int , __A : Dict=0 , __A : List[str]=(4, 4, 6_4, 6_4) , __A : List[str]=False ):
snake_case__ : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ : str = jnp.array(load_hf_numpy(self.get_file_format(A_ , A_ ) ) , dtype=A_ )
return image
def _lowercase ( self : Union[str, Any] , __A : str=False , __A : Dict="CompVis/stable-diffusion-v1-4" ):
snake_case__ : Any = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ : int = "bf16" if fpaa else None
snake_case__, snake_case__ : Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
A_ , subfolder="unet" , dtype=A_ , revision=A_ )
return model, params
def _lowercase ( self : str , __A : str=0 , __A : Optional[int]=(4, 7_7, 7_6_8) , __A : Any=False ):
snake_case__ : List[str] = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(A_ , A_ ) ) , dtype=A_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def _lowercase ( self : Tuple , __A : Union[str, Any] , __A : Dict , __A : List[str] ):
snake_case__, snake_case__ : str = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=A_ )
snake_case__ : Optional[int] = self.get_latents(A_ , fpaa=A_ )
snake_case__ : int = self.get_encoder_hidden_states(A_ , fpaa=A_ )
snake_case__ : List[Any] = model.apply(
{"params": params} , A_ , jnp.array(A_ , dtype=jnp.intaa ) , encoder_hidden_states=A_ , ).sample
assert sample.shape == latents.shape
snake_case__ : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case__ : List[Any] = jnp.array(A_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(A_ , A_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def _lowercase ( self : Optional[Any] , __A : List[str] , __A : str , __A : Any ):
snake_case__, snake_case__ : int = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=A_ )
snake_case__ : List[Any] = self.get_latents(A_ , shape=(4, 4, 9_6, 9_6) , fpaa=A_ )
snake_case__ : str = self.get_encoder_hidden_states(A_ , shape=(4, 7_7, 1_0_2_4) , fpaa=A_ )
snake_case__ : Optional[int] = model.apply(
{"params": params} , A_ , jnp.array(A_ , dtype=jnp.intaa ) , encoder_hidden_states=A_ , ).sample
assert sample.shape == latents.shape
snake_case__ : Tuple = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case__ : List[Any] = jnp.array(A_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(A_ , A_ , atol=1e-2 )
| 297 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : str = 'RegNetConfig'
# Base docstring
lowerCAmelCase : str = 'facebook/regnet-y-040'
lowerCAmelCase : Dict = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Dict = 'facebook/regnet-y-040'
lowerCAmelCase : int = 'tabby, tabby cat'
lowerCAmelCase : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.convolution(self.padding(A_ ) )
UpperCamelCase = self.normalization(A_ )
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config.num_channels
UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) )
UpperCamelCase = self.embedder(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 2 , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def UpperCAmelCase_ ( self , A_ , A_ = False )-> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(A_ ) , training=A_ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
UpperCamelCase = [
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.pooler(A_ )
for layer_module in self.attention:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Any:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) )
def UpperCAmelCase_ ( self , A_ , A_ = False , A_ = True )-> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(A_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
lowerCAmelCase_ = RegNetConfig
def __init__( self , A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config
UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' )
UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(A_ , training=A_ )
UpperCamelCase = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = """regnet"""
lowerCAmelCase_ = """pixel_values"""
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : str = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
# classification head
UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier[0](A_ )
UpperCamelCase = self.classifier[1](A_ )
UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 3 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A_(snake_case_ , unittest.TestCase ):
"""simple docstring"""
a_ : List[str] = DebertaVaTokenizer
a_ : Dict = DebertaVaTokenizerFast
a_ : Any = True
a_ : Optional[Any] = True
def _lowerCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : List[Any] = DebertaVaTokenizer(A_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , A ):
_lowerCamelCase : Optional[int] = 'this is a test'
_lowerCamelCase : Dict = 'this is a test'
return input_text, output_text
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = '<pad>'
_lowerCamelCase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(A_ ) , 3_0001 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = ' \tHeLLo!how \n Are yoU? '
_lowerCamelCase : List[str] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
_lowerCamelCase : Optional[int] = DebertaVaTokenizer(A_ , do_lower_case=A_ )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Tuple = DebertaVaTokenizerFast(A_ , do_lower_case=A_ )
_lowerCamelCase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = 'I was born in 92000, and this is falsé.'
_lowerCamelCase : int = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowerCamelCase : Optional[int] = DebertaVaTokenizer(A_ , split_by_punct=A_ )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : List[str] = DebertaVaTokenizerFast(A_ , split_by_punct=A_ )
_lowerCamelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = 'I was born in 92000, and this is falsé.'
_lowerCamelCase : int = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowerCamelCase : Optional[int] = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : List[str] = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_lowerCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = 'I was born in 92000, and this is falsé.'
_lowerCamelCase : Optional[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowerCamelCase : Dict = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Union[str, Any] = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_lowerCamelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = 'I was born in 92000, and this is falsé.'
_lowerCamelCase : str = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowerCamelCase : str = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Union[str, Any] = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_lowerCamelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = ' \tHeLLo!how \n Are yoU? '
_lowerCamelCase : int = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
_lowerCamelCase : int = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Union[str, Any] = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_lowerCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCamelCase : Dict = 'I was born in 92000, and this is falsé.'
_lowerCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
_lowerCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(A_ , add_special_tokens=A_ )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : List[Any] = self.get_rust_tokenizer()
_lowerCamelCase : int = tokenizer.encode(A_ )
_lowerCamelCase : Optional[Any] = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = 'This is a test'
_lowerCamelCase : Tuple = [13, 1, 4398, 25, 21, 1289]
_lowerCamelCase : Optional[Any] = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
_lowerCamelCase : Optional[Any] = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
_lowerCamelCase : Dict = DebertaVaTokenizer(A_ , keep_accents=A_ )
_lowerCamelCase : Dict = DebertaVaTokenizerFast(A_ , keep_accents=A_ )
_lowerCamelCase : Dict = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : List[str] = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Optional[Any] = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Optional[Any] = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
# fmt: off
_lowerCamelCase : Union[str, Any] = 'I was born in 92000, and this is falsé.'
_lowerCamelCase : List[str] = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_lowerCamelCase : Tuple = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
_lowerCamelCase : Optional[int] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowerCamelCase : Union[str, Any] = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Optional[int] = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Tuple = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : List[Any] = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = DebertaVaTokenizer(A_ )
_lowerCamelCase : Dict = tokenizer.encode('sequence builders' )
_lowerCamelCase : List[str] = tokenizer.encode('multi-sequence build' )
_lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(A_ )
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , A_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , A_ , )
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 437 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """perceiver"""
def __init__( self , A_=256 , A_=1280 , A_=768 , A_=1 , A_=26 , A_=8 , A_=8 , A_=None , A_=None , A_="kv" , A_=1 , A_=1 , A_="gelu" , A_=0.1 , A_=0.02 , A_=1e-12 , A_=True , A_=262 , A_=2048 , A_=56 , A_=[368, 496] , A_=16 , A_=1920 , A_=16 , A_=[1, 16, 224, 224] , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = num_latents
UpperCamelCase = d_latents
UpperCamelCase = d_model
UpperCamelCase = num_blocks
UpperCamelCase = num_self_attends_per_block
UpperCamelCase = num_self_attention_heads
UpperCamelCase = num_cross_attention_heads
UpperCamelCase = qk_channels
UpperCamelCase = v_channels
UpperCamelCase = cross_attention_shape_for_attention
UpperCamelCase = self_attention_widening_factor
UpperCamelCase = cross_attention_widening_factor
UpperCamelCase = hidden_act
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_query_residual
# masked language modeling attributes
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
# image classification attributes
UpperCamelCase = image_size
# flow attributes
UpperCamelCase = train_size
# multimodal autoencoding attributes
UpperCamelCase = num_frames
UpperCamelCase = audio_samples_per_frame
UpperCamelCase = samples_per_patch
UpperCamelCase = output_shape
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
return 1e-4
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , )-> Mapping[str, Any]:
'''simple docstring'''
if isinstance(A_ , A_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = preprocessor.num_special_tokens_to_add(A_ )
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join(['a'] ) * seq_length] * batch_size
UpperCamelCase = dict(preprocessor(A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(A_ , A_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase = self._generate_dummy_images(A_ , A_ , A_ , A_ )
UpperCamelCase = dict(preprocessor(images=A_ , return_tensors=A_ ) )
UpperCamelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 3 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( UpperCamelCase: str=None , UpperCamelCase: Union[str, Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=UpperCamelCase )
@dataclass
class a :
lowercase_ : Optional[Any] = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
lowercase_ : Dict = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
lowercase_ : str = list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
lowercase_ : int = field(
default=snake_case_ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
lowercase_ : str = field(
default=snake_case_ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
lowercase_ : Any = field(
default=snake_case_ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
lowercase_ : Any = field(default=snake_case_ , metadata={'help': 'Use FP16 to accelerate inference.'} )
lowercase_ : Optional[Any] = field(default=snake_case_ , metadata={'help': 'Benchmark training of model'} )
lowercase_ : List[str] = field(default=snake_case_ , metadata={'help': 'Verbose memory tracing'} )
lowercase_ : Any = field(
default=snake_case_ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
lowercase_ : Optional[Any] = field(
default=snake_case_ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
lowercase_ : int = field(default=snake_case_ , metadata={'help': 'Trace memory line by line'} )
lowercase_ : Any = field(default=snake_case_ , metadata={'help': 'Save result to a CSV file'} )
lowercase_ : Dict = field(default=snake_case_ , metadata={'help': 'Save all print statements in a log file'} )
lowercase_ : int = field(default=snake_case_ , metadata={'help': 'Whether to print environment information'} )
lowercase_ : Dict = field(
default=snake_case_ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
lowercase_ : Union[str, Any] = field(
default=f'inference_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
lowercase_ : Optional[Any] = field(
default=f'inference_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
lowercase_ : List[Any] = field(
default=f'train_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
lowercase_ : Optional[Any] = field(
default=f'train_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
lowercase_ : Dict = field(
default=f'env_info_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving environment information.'} , )
lowercase_ : Tuple = field(
default=f'log_{round(time() )}.csv' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
lowercase_ : List[Any] = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
lowercase_ : str = field(
default=snake_case_ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , A_ , )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = [\'bert-base-cased\']." )
return self.models
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 611 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 0 |
'''simple docstring'''
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, "src", "transformers")
A_ = '\n{0} = None\n'
A_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
A_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : int ):
snake_case__ : int = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(A_ )
snake_case__ : str = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(A_ , 'tokenizers' )
snake_case__ : int = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(A_ , 'tensorflow_text' )
snake_case__ : int = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tokenizers' )
snake_case__ : Any = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tensorflow_text' )
snake_case__ : Optional[Any] = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tokenizers_and_vision' )
def _lowerCAmelCase ( self : int ):
snake_case__ : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A_ )
self.assertIn('tensorflow_text' , A_ )
self.assertIn('sentencepiece_and_tokenizers' , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Optional[Any] = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(A_ , '\nCONSTANT = None\n' )
snake_case__ : Optional[int] = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
A_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case__ : int = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case__ : List[str] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(A_ , A_ )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Any = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case__ : Any = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , A_ )
| 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
__UpperCamelCase :Union[str, Any] = from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__UpperCamelCase :Any = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__UpperCamelCase :Any = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = ''''''
__UpperCamelCase :Any = flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
__UpperCamelCase :List[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
__UpperCamelCase :List[Any] = []
__UpperCamelCase :Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__UpperCamelCase :Optional[Any] = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__UpperCamelCase :Tuple = flax_key_tuple_array[:-1] + ['''weight''']
__UpperCamelCase :Any = jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__UpperCamelCase :int = flax_key_tuple_array[:-1] + ['''weight''']
__UpperCamelCase :Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__UpperCamelCase :int = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__UpperCamelCase :List[Any] = '''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__UpperCamelCase :int = np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
__UpperCamelCase :int = torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
__UpperCamelCase :Optional[int] = list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
return pt_model
| 167 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 0 |
from math import factorial
def UpperCamelCase( __UpperCamelCase : int = 20 ):
lowerCAmelCase_ : List[str] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCAmelCase_ : List[Any] = n // 2
return int(factorial(__UpperCamelCase ) / (factorial(__UpperCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
A__ : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 171 |
'''simple docstring'''
def A_( A : list[int]):
UpperCamelCase = []
if len(A) == 1:
return [nums.copy()]
for _ in range(len(A)):
UpperCamelCase = nums.pop(0)
UpperCamelCase = permute(A)
for perm in permutations:
perm.append(A)
result.extend(A)
nums.append(A)
return result
def A_( A : str):
def backtrack(A : str):
if start == len(A) - 1:
output.append(nums[:])
else:
for i in range(A , len(A)):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1)
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 3 | 0 |
from string import ascii_lowercase, ascii_uppercase
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not sentence:
return ""
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 345 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def A_( A : float , A : float , A : int):
UpperCamelCase = x
UpperCamelCase = y
for step in range(A): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A_( A : float):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(A , 1 , 1))
def A_( A : int = 800 , A : int = 600 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 50 , A : bool = True , ):
UpperCamelCase = Image.new('RGB' , (image_width, image_height))
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(A):
for image_y in range(A):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(A , A , A)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(A)
else:
UpperCamelCase = get_black_and_white_rgb(A)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 3 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase_ : List[str] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase_ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase_ : Any = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def __SCREAMING_SNAKE_CASE ( a__ : Optional[int] ,a__ : List[str] ,a__ : Tuple ,a__ : Tuple ) -> str:
__A : int = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
__A : Tuple = True
# Deal with multi-line cases
elif (
re.search(
rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" ,a__ ,)
is not None
):
__A : Union[str, Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__A : int = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__A : Any = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__A : Tuple = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__A : int = True
if not attribute_used:
__A : Tuple = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__A : Union[str, Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__A : int = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__A : Union[str, Any] = True
elif attribute.endswith("""_token_id""" ):
__A : Optional[Any] = True
# configuration class specific cases
if not case_allowed:
__A : List[Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
__A : List[str] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ) -> int:
__A : Dict = dict(inspect.signature(config_class.__init__ ).parameters )
__A : List[Any] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__A : List[str] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__A : Tuple = {}
if len(config_class.attribute_map ) > 0:
__A : Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__A : int = inspect.getsourcefile(a__ )
__A : List[str] = os.path.dirname(a__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__A : Optional[Any] = [os.path.join(a__ ,a__ ) for fn in os.listdir(a__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__A : Union[str, Any] = []
for path in modeling_paths:
if os.path.isfile(a__ ):
with open(a__ ) as fp:
modeling_sources.append(fp.read() )
__A : Dict = []
for config_param, default_value in zip(a__ ,a__ ):
# `attributes` here is all the variant names for `config_param`
__A : Any = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(a__ ,a__ ,a__ ,a__ ):
unused_attributes.append(attributes[0] )
return sorted(a__ )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__A : List[str] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__A : Dict = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda a__ : inspect.isclass(a__ )
and issubclass(a__ ,a__ )
and inspect.getmodule(a__ ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
__A : Any = check_config_attributes_being_used(a__ )
if len(a__ ) > 0:
__A : int = unused_attributes
if len(a__ ) > 0:
__A : int = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(a__ )
if __name__ == "__main__":
check_config_attributes()
| 17 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar('T')
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = data
__UpperCAmelCase: Any = self
__UpperCAmelCase: Dict = 0
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = {}
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = DisjointSetTreeNode(A_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.map[data]
if elem_ref != elem_ref.parent:
__UpperCAmelCase: str = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if nodea.rank > nodea.rank:
__UpperCAmelCase: str = nodea
else:
__UpperCAmelCase: List[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
self.link(self.find_set(A_ ) , self.find_set(A_ ) )
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = {}
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if node not in self.connections:
__UpperCAmelCase: Tuple = {}
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
self.add_node(A_ )
self.add_node(A_ )
__UpperCAmelCase: Dict = weight
__UpperCAmelCase: int = weight
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = []
__UpperCAmelCase: List[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
__UpperCAmelCase: int = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(A_ )
# MST generation
__UpperCAmelCase: Dict = 0
__UpperCAmelCase: Union[str, Any] = 0
__UpperCAmelCase: Optional[Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = edges[index]
index += 1
__UpperCAmelCase: List[Any] = disjoint_set.find_set(A_ )
__UpperCAmelCase: List[str] = disjoint_set.find_set(A_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(A_ , A_ , A_ )
disjoint_set.union(A_ , A_ )
return graph | 523 |
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCamelCase : Any = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''input_features''', '''attention_mask''']
def __init__( self , _snake_case=80 , _snake_case=1_60_00 , _snake_case=80 , _snake_case=0.0 , _snake_case=True , _snake_case=True , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
lowerCAmelCase = num_mel_bins
lowerCAmelCase = do_ceptral_normalize
lowerCAmelCase = normalize_means
lowerCAmelCase = normalize_vars
lowerCAmelCase = True
def UpperCamelCase__ ( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase = torch.from_numpy(_snake_case ).unsqueeze(0 )
lowerCAmelCase = ta_kaldi.fbank(_snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCamelCase__ ( _snake_case , _snake_case , _snake_case = True , _snake_case = True , _snake_case = 0.0 , ):
"""simple docstring"""
if normalize_means:
lowerCAmelCase = x[:input_length].mean(axis=0 )
lowerCAmelCase = np.subtract(_snake_case , _snake_case )
if normalize_vars:
lowerCAmelCase = x[:input_length].std(axis=0 )
lowerCAmelCase = np.divide(_snake_case , _snake_case )
if input_length < x.shape[0]:
lowerCAmelCase = padding_value
# make sure array is in float32
lowerCAmelCase = x.astype(np.floataa )
return x
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_snake_case , _snake_case , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_snake_case , _snake_case )
]
def __call__( self , _snake_case , _snake_case = False , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
lowerCAmelCase = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [raw_speech]
# extract fbank features
lowerCAmelCase = [self._extract_fbank_features(_snake_case ) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase = BatchFeature({'input_features': features} )
lowerCAmelCase = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
# make sure list is in array format
lowerCAmelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , _snake_case ):
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features]
lowerCAmelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase = (
np.array(_snake_case , dtype=np.intaa )
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=_snake_case )
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
| 4 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = 32 , _snake_case=PILImageResampling.BILINEAR , _snake_case = True , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize
lowerCAmelCase = do_rescale
lowerCAmelCase = size_divisor
lowerCAmelCase = resample
super().__init__(**_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = get_image_size(_snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase = height // size_divisor * size_divisor
lowerCAmelCase = width // size_divisor * size_divisor
lowerCAmelCase = resize(_snake_case , (new_h, new_w) , resample=_snake_case , data_format=_snake_case , **_snake_case )
return image
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case ):
"""simple docstring"""
return rescale(image=_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case=None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
lowerCAmelCase = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(_snake_case ) for img in images]
if do_resize:
lowerCAmelCase = [self.resize(_snake_case , size_divisor=_snake_case , resample=_snake_case ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(_snake_case , scale=1 / 2_55 ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 4 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__UpperCamelCase : Optional[Any] = tuple[int, int]
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vertices
lowerCAmelCase = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase = weight
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Graph({min(self.vertices )} , {} )
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase = edge
lowerCAmelCase = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ):
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = {}
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
with open(_UpperCAmelCase ) as f:
lowerCAmelCase = f.read().strip().split('\n' )
lowerCAmelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowerCAmelCase = graph.prims_algorithm()
lowerCAmelCase = sum(graph.edges.values() )
lowerCAmelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4 | 1 |
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__UpperCamelCase : Optional[int] = '''bert-base-cased'''
__UpperCamelCase : List[Any] = '''fp16'''
__UpperCamelCase : Any = '''bf16'''
__UpperCamelCase : Dict = [FPaa, BFaa]
@require_fsdp
@require_cuda
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_snake_case ):
lowerCAmelCase = self.dist_env.copy()
lowerCAmelCase = F'{i + 1}'
lowerCAmelCase = strategy
with mockenv_context(**_snake_case ):
lowerCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_snake_case ):
lowerCAmelCase = self.dist_env.copy()
lowerCAmelCase = prefetch_policy
with mockenv_context(**_snake_case ):
lowerCAmelCase = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_snake_case ):
lowerCAmelCase = self.dist_env.copy()
lowerCAmelCase = state_dict_type
with mockenv_context(**_snake_case ):
lowerCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = AutoModel.from_pretrained(_snake_case )
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCAmelCase = self.dist_env.copy()
lowerCAmelCase = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCAmelCase = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
lowerCAmelCase = '2000'
with mockenv_context(**_snake_case ):
lowerCAmelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_snake_case )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowerCAmelCase = self.dist_env.copy()
lowerCAmelCase = 'TRANSFORMER_BASED_WRAP'
lowerCAmelCase = 'T5Layer'
with mockenv_context(**_snake_case ):
lowerCAmelCase = FullyShardedDataParallelPlugin()
with self.assertRaises(_snake_case ) as cm:
fsdp_plugin.set_auto_wrap_policy(_snake_case )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
lowerCAmelCase = self.dist_env.copy()
lowerCAmelCase = 'SIZE_BASED_WRAP'
lowerCAmelCase = '0'
with mockenv_context(**_snake_case ):
lowerCAmelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_snake_case )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCAmelCase = self.dist_env.copy()
lowerCAmelCase = mp_dtype
with mockenv_context(**_snake_case ):
lowerCAmelCase = Accelerator()
if mp_dtype == "fp16":
lowerCAmelCase = torch.floataa
elif mp_dtype == "bf16":
lowerCAmelCase = torch.bfloataa
lowerCAmelCase = MixedPrecision(param_dtype=_snake_case , reduce_dtype=_snake_case , buffer_dtype=_snake_case )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _snake_case )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _snake_case ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCAmelCase = self.dist_env.copy()
lowerCAmelCase = str(_snake_case ).lower()
with mockenv_context(**_snake_case ):
lowerCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_snake_case ) )
@require_fsdp
@require_multi_gpu
@slow
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase = 0.82
lowerCAmelCase = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
lowerCAmelCase = {
'multi_gpu_fp16': 32_00,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 20_00,
'fsdp_full_shard_transformer_based_wrap_fp16': 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCAmelCase = 1_60
lowerCAmelCase = 1_60
lowerCAmelCase = inspect.getfile(accelerate.test_utils )
lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = os.path.join(self.test_scripts_folder , 'test_performance.py' )
lowerCAmelCase = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
lowerCAmelCase = cmd.copy()
for i, strategy in enumerate(_snake_case ):
if strategy.lower() in config:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--performance_lower_bound={self.performance_lower_bound}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
lowerCAmelCase = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(_snake_case ):
lowerCAmelCase = cmd.copy()
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
if strategy != "FULL_SHARD":
continue
lowerCAmelCase = len(_snake_case )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCAmelCase = cmd_config[:state_dict_config_index]
cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
lowerCAmelCase = cmd_config[:-1]
lowerCAmelCase = os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
F'--resume_from_checkpoint={resume_from_checkpoint}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
lowerCAmelCase = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCAmelCase = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(_snake_case ):
if strategy.lower() in spec:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--peak_memory_upper_bound={peak_mem_upper_bound}',
F'--n_train={self.n_train}',
F'--n_val={self.n_val}',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
| 4 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] )
lowerCAmelCase = np.array(_UpperCAmelCase )
lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = (1, 2, 1)
lowerCAmelCase = (1, 1, 0, 7)
lowerCAmelCase = SARIMAX(
_UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase )
lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' )
lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = regressor.predict(_UpperCAmelCase )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
train_user.sort()
lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 )
lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 )
lowerCAmelCase = qa - qa
lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ):
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase = not_safe + 1
else:
if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__UpperCamelCase : Any = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase : Dict = normalize_df[:, 2].tolist()
__UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist()
__UpperCamelCase : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase : Tuple = x[: len(x) - 1]
__UpperCamelCase : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase : str = total_date[: len(total_date) - 1]
__UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1]
__UpperCamelCase : List[Any] = total_match[: len(total_match) - 1]
__UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :]
__UpperCamelCase : str = total_user[len(total_user) - 1 :]
__UpperCamelCase : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 4 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] )
lowerCAmelCase = np.array(_UpperCAmelCase )
lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = (1, 2, 1)
lowerCAmelCase = (1, 1, 0, 7)
lowerCAmelCase = SARIMAX(
_UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase )
lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' )
lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = regressor.predict(_UpperCAmelCase )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
train_user.sort()
lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 )
lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 )
lowerCAmelCase = qa - qa
lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ):
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase = not_safe + 1
else:
if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__UpperCamelCase : Any = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase : Dict = normalize_df[:, 2].tolist()
__UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist()
__UpperCamelCase : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase : Tuple = x[: len(x) - 1]
__UpperCamelCase : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase : str = total_date[: len(total_date) - 1]
__UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1]
__UpperCamelCase : List[Any] = total_match[: len(total_match) - 1]
__UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :]
__UpperCamelCase : str = total_user[len(total_user) - 1 :]
__UpperCamelCase : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 4 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__UpperCamelCase : str = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__UpperCamelCase : Optional[Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = collections.OrderedDict()
lowerCAmelCase = collections.OrderedDict()
lowerCAmelCase = collections.OrderedDict()
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(_UpperCAmelCase ):
lowerCAmelCase = b
lowerCAmelCase = idx
for wd in b:
lowerCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ):
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowerCAmelCase = do_clean_text
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case )
lowerCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.raw_vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowerCAmelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowerCAmelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(','.join(_snake_case ) + '\n' )
index += 1
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class a ( a__ ):
def __init__( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vocab # same as swe
lowerCAmelCase = ids_to_tokens # same as bpe
lowerCAmelCase = emoji
lowerCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] )
lowerCAmelCase = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowerCAmelCase = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowerCAmelCase = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowerCAmelCase = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowerCAmelCase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowerCAmelCase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowerCAmelCase = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.content_repattera.sub('<URL>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<EMAIL>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<TEL>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<DATE>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<DATE>' , _snake_case )
lowerCAmelCase = self.content_repattera.sub('<PRICE>' , _snake_case )
lowerCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def UpperCamelCase__ ( self , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = text.replace(' ' , '<SP>' )
lowerCAmelCase = text.replace(' ' , '<SP>' )
lowerCAmelCase = text.replace('\r\n' , '<BR>' )
lowerCAmelCase = text.replace('\n' , '<BR>' )
lowerCAmelCase = text.replace('\r' , '<BR>' )
lowerCAmelCase = text.replace('\t' , '<TAB>' )
lowerCAmelCase = text.replace('—' , 'ー' )
lowerCAmelCase = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase = text.replace(_snake_case , _snake_case )
if clean:
lowerCAmelCase = self.clean_text(_snake_case )
def check_simbol(_snake_case ):
lowerCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
lowerCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(_snake_case ):
lowerCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
lowerCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
lowerCAmelCase = 0
lowerCAmelCase = []
while pos < len(_snake_case ):
lowerCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowerCAmelCase = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
lowerCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
lowerCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
lowerCAmelCase = e
else:
lowerCAmelCase = pos + 1
lowerCAmelCase = text[pos:end]
if check_simbol(_snake_case ):
result.append('<KIGOU>' )
elif checkuae(_snake_case ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowerCAmelCase = end
return result
def UpperCamelCase__ ( self , _snake_case , _snake_case="\n" ):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase = ''.join(_snake_case )
return text
| 4 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ):
lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCamelCase : Optional[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCamelCase : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ):
try:
lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(_UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ):
lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval()
else:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}'
lowerCAmelCase = teacher.config.to_diff_dict()
try:
lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCAmelCase )
# Copy weights
lowerCAmelCase = teacher.config_class(**_UpperCAmelCase )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(_UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
if d_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
try:
if hasattr(
_UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
lowerCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 4 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_UpperCAmelCase )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 1_0000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = F'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_UpperCAmelCase )} examples processed.' )
lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(_UpperCAmelCase )
first_sum += 1 / float(_UpperCAmelCase )
index += 1
return 1 / first_sum
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase = F'Resistor at index {index} has a negative value!'
raise ValueError(_UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
lowerCAmelCase = args.pruning_method
lowerCAmelCase = args.threshold
lowerCAmelCase = args.model_name_or_path.rstrip('/' )
lowerCAmelCase = args.target_model_path
print(F'Load fine-pruned model from {model_name_or_path}' )
lowerCAmelCase = torch.load(os.path.join(_UpperCAmelCase , 'pytorch_model.bin' ) )
lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase = tensor
print(F'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase = tensor
print(F'Copied layer {name}' )
elif "bias" in name:
lowerCAmelCase = tensor
print(F'Copied layer {name}' )
else:
if pruning_method == "magnitude":
lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_UpperCAmelCase , threshold=_UpperCAmelCase )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F'{prefix_}mask_scores']
lowerCAmelCase = TopKBinarizer.apply(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F'{prefix_}mask_scores']
lowerCAmelCase = ThresholdBinarizer.apply(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F'{prefix_}mask_scores']
lowerCAmelCase ,lowerCAmelCase = -0.1, 1.1
lowerCAmelCase = torch.sigmoid(_UpperCAmelCase )
lowerCAmelCase = s * (r - l) + l
lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowerCAmelCase = os.path.join(
os.path.dirname(_UpperCAmelCase ) , F'bertarized_{os.path.basename(_UpperCAmelCase )}' )
if not os.path.isdir(_UpperCAmelCase ):
shutil.copytree(_UpperCAmelCase , _UpperCAmelCase )
print(F'\nCreated folder {target_model_path}' )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__UpperCamelCase : List[Any] = parser.parse_args()
main(args)
| 4 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( a__ ):
snake_case__ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 4 | 1 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 1_28 , _snake_case = 2_56 , _snake_case = 2_000.0 , _snake_case = 7_68 , _snake_case = 12 , _snake_case = 12 , _snake_case = 64 , _snake_case = 20_48 , _snake_case = 0.1 , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Sequential(
nn.Linear(_snake_case , d_model * 4 , bias=_snake_case ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_snake_case ) , nn.SiLU() , )
lowerCAmelCase = nn.Embedding(_snake_case , _snake_case )
lowerCAmelCase = False
lowerCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase = nn.Dropout(p=_snake_case )
lowerCAmelCase = nn.ModuleList()
for lyr_num in range(_snake_case ):
# FiLM conditional T5 decoder
lowerCAmelCase = DecoderLayer(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case )
self.decoders.append(_snake_case )
lowerCAmelCase = TaLayerNorm(_snake_case )
lowerCAmelCase = nn.Dropout(p=_snake_case )
lowerCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCAmelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCAmelCase = self.conditioning_emb(_snake_case ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCAmelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCAmelCase = torch.broadcast_to(
torch.arange(_snake_case , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCAmelCase = self.position_encoding(_snake_case )
lowerCAmelCase = self.continuous_inputs_projection(_snake_case )
inputs += position_encodings
lowerCAmelCase = self.dropout(_snake_case )
# decoder: No padding present.
lowerCAmelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCAmelCase = [(x, self.encoder_decoder_mask(_snake_case , _snake_case )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCAmelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCAmelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCAmelCase = lyr(
_snake_case , conditioning_emb=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )[0]
lowerCAmelCase = self.decoder_norm(_snake_case )
lowerCAmelCase = self.post_dropout(_snake_case )
lowerCAmelCase = self.spec_out(_snake_case )
return spec_out
class a ( nn.Module ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=1E-6 ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case ) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = self.layer[0](
_snake_case , conditioning_emb=_snake_case , attention_mask=_snake_case , )
if encoder_hidden_states is not None:
lowerCAmelCase = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
lowerCAmelCase = self.layer[1](
_snake_case , key_value_states=_snake_case , attention_mask=_snake_case , )
# Apply Film Conditional Feed Forward layer
lowerCAmelCase = self.layer[-1](_snake_case , _snake_case )
return (hidden_states,)
class a ( nn.Module ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = TaLayerNorm(_snake_case )
lowerCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case )
lowerCAmelCase = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case )
lowerCAmelCase = nn.Dropout(_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = self.layer_norm(_snake_case )
if conditioning_emb is not None:
lowerCAmelCase = self.FiLMLayer(_snake_case , _snake_case )
# Self-attention block
lowerCAmelCase = self.attention(_snake_case )
lowerCAmelCase = hidden_states + self.dropout(_snake_case )
return hidden_states
class a ( nn.Module ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case )
lowerCAmelCase = TaLayerNorm(_snake_case , eps=_snake_case )
lowerCAmelCase = nn.Dropout(_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = self.layer_norm(_snake_case )
lowerCAmelCase = self.attention(
_snake_case , encoder_hidden_states=_snake_case , attention_mask=attention_mask.squeeze(1 ) , )
lowerCAmelCase = hidden_states + self.dropout(_snake_case )
return layer_output
class a ( nn.Module ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = TaDenseGatedActDense(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case )
lowerCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case )
lowerCAmelCase = TaLayerNorm(_snake_case , eps=_snake_case )
lowerCAmelCase = nn.Dropout(_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = self.layer_norm(_snake_case )
if conditioning_emb is not None:
lowerCAmelCase = self.film(_snake_case , _snake_case )
lowerCAmelCase = self.DenseReluDense(_snake_case )
lowerCAmelCase = hidden_states + self.dropout(_snake_case )
return hidden_states
class a ( nn.Module ):
def __init__( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase = nn.Dropout(_snake_case )
lowerCAmelCase = NewGELUActivation()
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.act(self.wi_a(_snake_case ) )
lowerCAmelCase = self.wi_a(_snake_case )
lowerCAmelCase = hidden_gelu * hidden_linear
lowerCAmelCase = self.dropout(_snake_case )
lowerCAmelCase = self.wo(_snake_case )
return hidden_states
class a ( nn.Module ):
def __init__( self , _snake_case , _snake_case=1E-6 ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Parameter(torch.ones(_snake_case ) )
lowerCAmelCase = eps
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_snake_case )
lowerCAmelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCAmelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class a ( nn.Module ):
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(_snake_case , 3.0 )) ))
class a ( nn.Module ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Linear(_snake_case , out_features * 2 , bias=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.scale_bias(_snake_case )
lowerCAmelCase ,lowerCAmelCase = torch.chunk(_snake_case , 2 , -1 )
lowerCAmelCase = x * (1 + scale) + shift
return x
| 4 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = tf.convert_to_tensor(_snake_case )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = True
snake_case__ = 1_0
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the sequence output on [0, :3, :3]
lowerCAmelCase = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCAmelCase = outputs.loss
lowerCAmelCase = (2,)
self.assertEqual(loss.shape , _snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = (2, 2)
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the shape of the logits
lowerCAmelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _snake_case )
self.assertEqual(outputs.end_logits.shape , _snake_case )
| 4 | 1 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : int ):
lowerCAmelCase = len(_UpperCAmelCase )
lowerCAmelCase = int(math.floor(math.sqrt(_UpperCAmelCase ) ) )
lowerCAmelCase = 0
while arr[min(_UpperCAmelCase , _UpperCAmelCase ) - 1] < x:
lowerCAmelCase = step
step += int(math.floor(math.sqrt(_UpperCAmelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCAmelCase = prev + 1
if prev == min(_UpperCAmelCase , _UpperCAmelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__UpperCamelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Any = [int(item) for item in user_input.split(''',''')]
__UpperCamelCase : Dict = int(input('''Enter the number to be searched:\n'''))
__UpperCamelCase : List[str] = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 4 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4 | 1 |
"""simple docstring"""
import operator as op
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
lowerCAmelCase = []
lowerCAmelCase = lambda _UpperCAmelCase , _UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(_UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(_UpperCAmelCase ) , sep=' | ' )
else:
lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(_UpperCAmelCase ) , sep=' | ' )
lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(_UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(_UpperCAmelCase ) , int(_UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(_UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCamelCase : Any = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 4 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = AutoTokenizer.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = 'The dog is cute and lives in the garden house'
lowerCAmelCase = jnp.array([tokenizer.encode(_snake_case )] )
lowerCAmelCase = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
lowerCAmelCase = model(_snake_case )['last_hidden_state']
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _snake_case , atol=1E-3 ) )
| 4 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 4 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( a__ ):
snake_case__ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 4 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is
lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 4 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCamelCase : Optional[Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCamelCase : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCamelCase : Dict = dict(zip(vocab, range(len(vocab))))
__UpperCamelCase : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Any = Path(tmpdirname)
__UpperCamelCase : Optional[int] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCamelCase : Tuple = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCamelCase : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCamelCase : Dict = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCamelCase : List[Any] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCamelCase : Optional[int] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
__UpperCamelCase : Union[str, Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCamelCase : Dict = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 4 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_UpperCAmelCase )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 1_0000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = F'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_UpperCAmelCase )} examples processed.' )
lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase : Tuple = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any=8 ):
lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( a__ ):
def __init__( self , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if latents is None:
lowerCAmelCase = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCAmelCase = latents.to(_snake_case )
lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , _snake_case=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase ,lowerCAmelCase = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case )
# We'll offload the last model manually.
lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case )
def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case = 5_12 , _snake_case = 5_12 , _snake_case = 1_00 , _snake_case = 4.0 , _snake_case = 1 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , ):
"""simple docstring"""
lowerCAmelCase = self._execution_device
lowerCAmelCase = guidance_scale > 1.0
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowerCAmelCase = image_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = negative_image_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = hint.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
lowerCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
self.scheduler.set_timesteps(_snake_case , device=_snake_case )
lowerCAmelCase = self.scheduler.timesteps
lowerCAmelCase = self.movq.config.latent_channels
lowerCAmelCase ,lowerCAmelCase = downscale_height_and_width(_snake_case , _snake_case , self.movq_scale_factor )
# create initial latent
lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _snake_case , _snake_case , _snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase = {'image_embeds': image_embeds, 'hint': hint}
lowerCAmelCase = self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
lowerCAmelCase ,lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase ,lowerCAmelCase = noise_pred.chunk(2 )
lowerCAmelCase ,lowerCAmelCase = variance_pred.chunk(2 )
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase ,lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , )[0]
# post-processing
lowerCAmelCase = self.movq.decode(_snake_case , force_not_quantize=_snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCAmelCase = image * 0.5 + 0.5
lowerCAmelCase = image.clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 4 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 | 1 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( a__ , unittest.TestCase ):
snake_case__ = DanceDiffusionPipeline
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase = IPNDMScheduler()
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = DanceDiffusionPipeline(**_snake_case )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe(**_snake_case )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 1 |
"""simple docstring"""
__UpperCamelCase : str = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float ):
assert type(_UpperCAmelCase ) in (int, float) and decimal == int(_UpperCAmelCase )
lowerCAmelCase = int(_UpperCAmelCase )
lowerCAmelCase = ''
lowerCAmelCase = False
if decimal < 0:
lowerCAmelCase = True
decimal *= -1
while decimal > 0:
lowerCAmelCase ,lowerCAmelCase = divmod(_UpperCAmelCase , 16 )
lowerCAmelCase = values[remainder] + hexadecimal
lowerCAmelCase = '0x' + hexadecimal
if negative:
lowerCAmelCase = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = size if size is not None else {'shortest_edge': 2_56}
lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase = get_size_dict(_snake_case )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BICUBIC , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase = get_resize_output_image_size(_snake_case , size=size['shortest_edge'] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(_snake_case )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 4 |
"""simple docstring"""
from typing import Any
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = data
lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return F'Node({self.data})'
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase = self.head
while node:
yield node.data
lowerCAmelCase = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(_snake_case ) for item in self] )
def __getitem__( self , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase = self.head
for _ in range(_snake_case ):
lowerCAmelCase = current.next
lowerCAmelCase = data
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(len(self ) , _snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(0 , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase = Node(_snake_case )
if self.head is None:
lowerCAmelCase = new_node
elif index == 0:
lowerCAmelCase = self.head # link new_node to head
lowerCAmelCase = new_node
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = new_node
def UpperCamelCase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _snake_case = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase = self.head # default first node
if index == 0:
lowerCAmelCase = self.head.next
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = self.head
while current:
# Store the current node's next node.
lowerCAmelCase = current.next
# Make the current node's next point backwards
lowerCAmelCase = prev
# Make the previous node be the current node
lowerCAmelCase = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase = next_node
# Return prev in order to put the head at the end
lowerCAmelCase = prev
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_UpperCAmelCase ) == i
linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_UpperCAmelCase ) == 9
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase = linked_list.delete_head()
assert result == -9
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCAmelCase )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ():
from doctest import testmod
testmod()
lowerCAmelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_UpperCAmelCase )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
lowerCAmelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_UpperCAmelCase )
print(F'length of linked_list is : {len(_UpperCAmelCase )}' )
if __name__ == "__main__":
main()
| 4 | 1 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
if len(_UpperCAmelCase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
"""simple docstring"""
from __future__ import annotations
import requests
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_UpperCAmelCase ).json()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 1 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _SCREAMING_SNAKE_CASE ():
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowerCAmelCase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _SCREAMING_SNAKE_CASE ():
assert _test_patching.open is open
lowerCAmelCase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _UpperCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _SCREAMING_SNAKE_CASE ():
# pandas.read_csv is not present in _test_patching
lowerCAmelCase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _UpperCAmelCase ):
pass
def _SCREAMING_SNAKE_CASE ():
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
lowerCAmelCase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _UpperCAmelCase ) is None
with patch_submodule(_test_patching , 'len' , _UpperCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '__test_patch_submodule_start_and_stop_mock__'
lowerCAmelCase = patch_submodule(_test_patching , 'open' , _UpperCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _SCREAMING_SNAKE_CASE ():
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowerCAmelCase = '__test_patch_submodule_successive_join__'
lowerCAmelCase = '__test_patch_submodule_successive_dirname__'
lowerCAmelCase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ):
with patch_submodule(_test_patching , 'os.rename' , _UpperCAmelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _UpperCAmelCase ):
with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _UpperCAmelCase ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _UpperCAmelCase ):
pass
| 4 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ):
lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 48
lowerCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 60
lowerCAmelCase = [6, 6, 6, 6]
lowerCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = 4
lowerCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 126
lowerCAmelCase = 7
lowerCAmelCase = 255.0
lowerCAmelCase = ''
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowerCAmelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowerCAmelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowerCAmelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowerCAmelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowerCAmelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowerCAmelCase = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowerCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowerCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
lowerCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowerCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowerCAmelCase = 'swin2sr.' + name
return name
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[4] )
lowerCAmelCase = config.embed_dim
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
pass
else:
lowerCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
lowerCAmelCase = get_config(_UpperCAmelCase )
lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
lowerCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 512, 512] )
lowerCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-3 )
print('Looks ok!' )
lowerCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 4 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''megatron-bert'''
def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
| 4 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
lowerCAmelCase = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCAmelCase = model(_snake_case )['last_hidden_state']
lowerCAmelCase = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 4 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
"""simple docstring"""
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = val
lowerCAmelCase = None
lowerCAmelCase = None
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase = Node(_snake_case )
else:
self.left.insert(_snake_case )
elif val > self.val:
if self.right is None:
lowerCAmelCase = Node(_snake_case )
else:
self.right.insert(_snake_case )
else:
lowerCAmelCase = val
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : str ):
# Recursive traversal
if root:
inorder(root.left , _UpperCAmelCase )
res.append(root.val )
inorder(root.right , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
# Build BST
if len(_UpperCAmelCase ) == 0:
return arr
lowerCAmelCase = Node(arr[0] )
for i in range(1 , len(_UpperCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase = []
inorder(_UpperCAmelCase , _UpperCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : List[str] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__UpperCamelCase : Optional[Any] = tuple[int, int]
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = vertices
lowerCAmelCase = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase = weight
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Graph({min(self.vertices )} , {} )
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase = edge
lowerCAmelCase = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "p107_network.txt" ):
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = {}
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
with open(_UpperCAmelCase ) as f:
lowerCAmelCase = f.read().strip().split('\n' )
lowerCAmelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowerCAmelCase = graph.prims_algorithm()
lowerCAmelCase = sum(graph.edges.values() )
lowerCAmelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=4 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( a__ , unittest.TestCase ):
snake_case__ = True
snake_case__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxBertModelTester(self )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = FlaxBertModel.from_pretrained('bert-base-cased' )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
| 4 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCAmelCase )] )
lowerCAmelCase = np.array(_UpperCAmelCase )
lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCAmelCase ) ) , x.transpose() ) , _UpperCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = (1, 2, 1)
lowerCAmelCase = (1, 1, 0, 7)
lowerCAmelCase = SARIMAX(
_UpperCAmelCase , exog=_UpperCAmelCase , order=_UpperCAmelCase , seasonal_order=_UpperCAmelCase )
lowerCAmelCase = model.fit(disp=_UpperCAmelCase , maxiter=600 , method='nm' )
lowerCAmelCase = model_fit.predict(1 , len(_UpperCAmelCase ) , exog=[test_match] )
return result[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list ):
lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = regressor.predict(_UpperCAmelCase )
return y_pred[0]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ):
train_user.sort()
lowerCAmelCase = np.percentile(_UpperCAmelCase , 25 )
lowerCAmelCase = np.percentile(_UpperCAmelCase , 75 )
lowerCAmelCase = qa - qa
lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : float ):
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase = not_safe + 1
else:
if abs(abs(_UpperCAmelCase ) - abs(_UpperCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase : Optional[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__UpperCamelCase : Any = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__UpperCamelCase : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase : Dict = normalize_df[:, 2].tolist()
__UpperCamelCase : Union[str, Any] = normalize_df[:, 0].tolist()
__UpperCamelCase : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase : Optional[int] = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase : Tuple = x[: len(x) - 1]
__UpperCamelCase : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase : str = total_date[: len(total_date) - 1]
__UpperCamelCase : Union[str, Any] = total_user[: len(total_user) - 1]
__UpperCamelCase : List[Any] = total_match[: len(total_match) - 1]
__UpperCamelCase : Optional[Any] = total_date[len(total_date) - 1 :]
__UpperCamelCase : str = total_user[len(total_user) - 1 :]
__UpperCamelCase : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase : List[str] = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 4 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__UpperCamelCase : Union[str, Any] = generate_large_matrix()
__UpperCamelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
assert all(row == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for row in grid )
assert all(list(_UpperCAmelCase ) == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for col in zip(*_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] ):
lowerCAmelCase = 0
lowerCAmelCase = len(_UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase = (left + right) // 2
lowerCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase = mid + 1
else:
lowerCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(grid[0] )
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCAmelCase ) * len(grid[0] )) - total
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = 0
for row in grid:
for i, number in enumerate(_UpperCAmelCase ):
if number < 0:
total += len(_UpperCAmelCase ) - i
break
return total
def _SCREAMING_SNAKE_CASE ():
from timeit import timeit
print('Running benchmarks' )
lowerCAmelCase = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase = timeit(F'{func}(grid=grid)' , setup=_UpperCAmelCase , number=500 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 4 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : nn.ModuleList , _UpperCAmelCase : List[int] ):
lowerCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'{len(_UpperCAmelCase )} != {len(_UpperCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCamelCase : Optional[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCamelCase : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ):
try:
lowerCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(_UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, PreTrainedModel] , _UpperCAmelCase : Union[str, Path] = "student" , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : Union[int, None] = None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ):
lowerCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase ) # purely for convenience
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).eval()
else:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'teacher must be a model or string got type {type(_UpperCAmelCase )}'
lowerCAmelCase = teacher.config.to_diff_dict()
try:
lowerCAmelCase ,lowerCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase ,lowerCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase = teacher_e
if d is None:
lowerCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCAmelCase )
# Copy weights
lowerCAmelCase = teacher.config_class(**_UpperCAmelCase )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase = student.load_state_dict(teacher.state_dict() , strict=_UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase ,lowerCAmelCase = list(range(_UpperCAmelCase ) ), list(range(_UpperCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(_UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
if d_layers_to_copy is None:
lowerCAmelCase = pick_layers_to_copy(_UpperCAmelCase , _UpperCAmelCase )
try:
if hasattr(
_UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
lowerCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 4 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class a :
snake_case__ = 42
snake_case__ = 42
snake_case__ = None
snake_case__ = None
snake_case__ = None
@dataclass(frozen=a__ )
class a :
snake_case__ = 42
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class a ( a__ ):
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = None , _snake_case=False , _snake_case = False , ):
"""simple docstring"""
lowerCAmelCase = hans_processors[task]()
lowerCAmelCase = os.path.join(
_snake_case , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_snake_case ) , _snake_case , ) , )
lowerCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
lowerCAmelCase = torch.load(_snake_case )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
lowerCAmelCase = (
processor.get_dev_examples(_snake_case ) if evaluate else processor.get_train_examples(_snake_case )
)
logger.info('Training examples: %s' , len(_snake_case ) )
lowerCAmelCase = hans_convert_examples_to_features(_snake_case , _snake_case , _snake_case , _snake_case )
logger.info('Saving features into cached file %s' , _snake_case )
torch.save(self.features , _snake_case )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class a :
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 1_28 , _snake_case=False , _snake_case = False , ):
"""simple docstring"""
lowerCAmelCase = hans_processors[task]()
lowerCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
lowerCAmelCase = processor.get_dev_examples(_snake_case ) if evaluate else processor.get_train_examples(_snake_case )
lowerCAmelCase = hans_convert_examples_to_features(_snake_case , _snake_case , _snake_case , _snake_case )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_snake_case )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase = tf.data.Dataset.from_generator(
_snake_case , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
class a ( a__ ):
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_snake_case , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_snake_case , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for i, line in enumerate(_snake_case ):
if i == 0:
continue
lowerCAmelCase = '%s-%s' % (set_type, line[0])
lowerCAmelCase = line[5]
lowerCAmelCase = line[6]
lowerCAmelCase = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCAmelCase = line[0]
examples.append(InputExample(guid=_snake_case , text_a=_snake_case , text_b=_snake_case , label=_snake_case , pairID=_snake_case ) )
return examples
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[InputExample] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : PreTrainedTokenizer , ):
lowerCAmelCase = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCAmelCase = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCAmelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
lowerCAmelCase = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
__UpperCamelCase : Optional[Any] = {
'''hans''': 3,
}
__UpperCamelCase : int = {
'''hans''': HansProcessor,
}
| 4 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : List[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(_UpperCAmelCase )
first_sum += 1 / float(_UpperCAmelCase )
index += 1
return 1 / first_sum
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase = F'Resistor at index {index} has a negative value!'
raise ValueError(_UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
"""simple docstring"""
class a : # Public class to implement a graph
def __init__( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = row
lowerCAmelCase = col
lowerCAmelCase = graph
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowerCAmelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
lowerCAmelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _snake_case ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _snake_case )
def UpperCamelCase__ ( self ): # And finally, count all islands.
"""simple docstring"""
lowerCAmelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowerCAmelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_snake_case , _snake_case , _snake_case )
count += 1
return count
| 4 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( a__ ):
snake_case__ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 4 | 1 |
"""simple docstring"""
import numpy as np
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float = 1e-12 , _UpperCAmelCase : int = 100 , ):
assert np.shape(_UpperCAmelCase )[0] == np.shape(_UpperCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(_UpperCAmelCase )[0] == np.shape(_UpperCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_UpperCAmelCase ) == np.iscomplexobj(_UpperCAmelCase )
lowerCAmelCase = np.iscomplexobj(_UpperCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_UpperCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCAmelCase = False
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
lowerCAmelCase = np.dot(_UpperCAmelCase , _UpperCAmelCase )
# Normalize the resulting output vector.
lowerCAmelCase = w / np.linalg.norm(_UpperCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCAmelCase = vector.conj().T if is_complex else vector.T
lowerCAmelCase = np.dot(_UpperCAmelCase , np.dot(_UpperCAmelCase , _UpperCAmelCase ) )
# Check convergence.
lowerCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCAmelCase = True
lowerCAmelCase = lambda_
if is_complex:
lowerCAmelCase = np.real(lambda_ )
return lambda_, vector
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCAmelCase = np.array([41, 4, 20] )
lowerCAmelCase = real_input_matrix.astype(np.complexaaa )
lowerCAmelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCAmelCase = real_input_matrix
lowerCAmelCase = real_vector
elif problem_type == "complex":
lowerCAmelCase = complex_input_matrix
lowerCAmelCase = complex_vector
# Our implementation.
lowerCAmelCase ,lowerCAmelCase = power_iteration(_UpperCAmelCase , _UpperCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCAmelCase ,lowerCAmelCase = np.linalg.eigh(_UpperCAmelCase )
# Last eigenvalue is the maximum one.
lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_UpperCAmelCase ) - np.abs(_UpperCAmelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 4 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = tf.convert_to_tensor(_snake_case )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = True
snake_case__ = 1_0
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the sequence output on [0, :3, :3]
lowerCAmelCase = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCAmelCase = outputs.loss
lowerCAmelCase = (2,)
self.assertEqual(loss.shape , _snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = (2, 2)
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the shape of the logits
lowerCAmelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _snake_case )
self.assertEqual(outputs.end_logits.shape , _snake_case )
| 4 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float , _UpperCAmelCase : float ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_UpperCAmelCase ) * abs(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 4 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4 | 1 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__UpperCamelCase : int = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ):
inspect_dataset(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = path + '.py'
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : int ):
inspect_metric(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = path + '.py'
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase = get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ):
with pytest.raises(_UpperCAmelCase ):
get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase = get_dataset_config_names(_UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ):
lowerCAmelCase = get_dataset_infos(_UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
lowerCAmelCase = expected_configs[0]
assert expected_config in infos
lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ):
lowerCAmelCase = get_dataset_infos(_UpperCAmelCase )
assert expected_config in infos
lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
with pytest.raises(_UpperCAmelCase ):
get_dataset_split_names(_UpperCAmelCase , config_name=_UpperCAmelCase )
| 4 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : List[Any] = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 4 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class a ( a__ ):
snake_case__ = '''luke'''
def __init__( self , _snake_case=5_02_67 , _snake_case=50_00_00 , _snake_case=7_68 , _snake_case=2_56 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=True , _snake_case=None , _snake_case=1 , _snake_case=0 , _snake_case=2 , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = entity_vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = entity_emb_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_entity_aware_attention
lowerCAmelCase = classifier_dropout
| 4 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = inputs_dict['labels']
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_snake_case ) # the president is
lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 4 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Sequence[float] , _UpperCAmelCase : float ):
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Sequence[float] , _UpperCAmelCase : float ):
lowerCAmelCase = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase = result * x + coeff
return result
if __name__ == "__main__":
__UpperCamelCase : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
__UpperCamelCase : int = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 4 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_UpperCAmelCase )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 1_0000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = F'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_UpperCAmelCase )} examples processed.' )
lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a ( a__ ):
snake_case__ = '''vit_mae'''
def __init__( self , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=2_24 , _snake_case=16 , _snake_case=3 , _snake_case=True , _snake_case=16 , _snake_case=5_12 , _snake_case=8 , _snake_case=20_48 , _snake_case=0.75 , _snake_case=False , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = decoder_num_attention_heads
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = decoder_num_hidden_layers
lowerCAmelCase = decoder_intermediate_size
lowerCAmelCase = mask_ratio
lowerCAmelCase = norm_pix_loss
| 4 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 | 1 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class a ( a__ , a__ ):
snake_case__ = '''pixel_values'''
snake_case__ = False
snake_case__ = TimmBackboneConfig
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
requires_backends(self , 'timm' )
super().__init__(_snake_case )
lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(_snake_case , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
lowerCAmelCase = getattr(_snake_case , 'use_pretrained_backbone' , _snake_case )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase = config.out_indices if getattr(_snake_case , 'out_indices' , _snake_case ) is not None else (-1,)
lowerCAmelCase = timm.create_model(
config.backbone , pretrained=_snake_case , features_only=config.features_only , in_chans=config.num_channels , out_indices=_snake_case , **_snake_case , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase = self._backbone.return_layers
lowerCAmelCase = {layer['module']: str(_snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_snake_case )
@classmethod
def UpperCamelCase__ ( cls , _snake_case , *_snake_case , **_snake_case ):
"""simple docstring"""
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
lowerCAmelCase = kwargs.pop('use_timm_backbone' , _snake_case )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
lowerCAmelCase = TimmBackboneConfig(
backbone=_snake_case , num_channels=_snake_case , features_only=_snake_case , use_pretrained_backbone=_snake_case , out_indices=_snake_case , )
return super()._from_config(_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase = self._all_layers
lowerCAmelCase = self._backbone(_snake_case , **_snake_case )
lowerCAmelCase = self._return_layers
lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase = self._backbone(_snake_case , **_snake_case )
lowerCAmelCase = None
lowerCAmelCase = tuple(_snake_case )
lowerCAmelCase = tuple(_snake_case ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase = (feature_maps,)
if output_hidden_states:
lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_snake_case , hidden_states=_snake_case , attentions=_snake_case )
| 4 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( a__ , unittest.TestCase ):
snake_case__ = DanceDiffusionPipeline
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase = IPNDMScheduler()
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = DanceDiffusionPipeline(**_snake_case )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe(**_snake_case )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a :
def __init__( self , _snake_case , _snake_case=2 , _snake_case=3 , _snake_case=4 , _snake_case=2 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=36 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=6 , _snake_case=6 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = coordinate_size
lowerCAmelCase = shape_size
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase = text_seq_length
lowerCAmelCase = (image_size // patch_size) ** 2 + 1
lowerCAmelCase = self.text_seq_length + self.image_seq_length
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowerCAmelCase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = tmp_coordinate
lowerCAmelCase = tf.constant(_snake_case )
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMvaModel(config=_snake_case )
# text + image
lowerCAmelCase = model(_snake_case , pixel_values=_snake_case , training=_snake_case )
lowerCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , training=_snake_case , )
lowerCAmelCase = model(_snake_case , bbox=_snake_case , pixel_values=_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase = model(_snake_case , training=_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase = model({'pixel_values': pixel_values} , training=_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMvaForSequenceClassification(config=_snake_case )
lowerCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , training=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMvaForTokenClassification(config=_snake_case )
lowerCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , training=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = 2
lowerCAmelCase = TFLayoutLMvaForQuestionAnswering(config=_snake_case )
lowerCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , training=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase) ,(lowerCAmelCase) ,(lowerCAmelCase) ,(lowerCAmelCase) ,(lowerCAmelCase) ,(lowerCAmelCase) ,(lowerCAmelCase) ,(lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return True
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(_snake_case )
if model_class in get_values(_snake_case ):
lowerCAmelCase = {
k: tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_snake_case , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_snake_case ):
lowerCAmelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_snake_case ):
lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_snake_case ):
lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_snake_case ):
lowerCAmelCase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMvaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
if getattr(_snake_case , 'hf_compute_loss' , _snake_case ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , _snake_case , return_labels=_snake_case )
lowerCAmelCase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_snake_case )[0]
]
lowerCAmelCase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , _snake_case , return_labels=_snake_case )
lowerCAmelCase = prepared_for_class.pop('input_ids' )
lowerCAmelCase = model(_snake_case , **_snake_case )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , _snake_case , return_labels=_snake_case )
lowerCAmelCase = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
lowerCAmelCase = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCAmelCase = -1_00
lowerCAmelCase = tf.convert_to_tensor(_snake_case )
lowerCAmelCase = model(_snake_case , **_snake_case )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , _snake_case , return_labels=_snake_case )
lowerCAmelCase = model(_snake_case )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCAmelCase = self._prepare_for_class(inputs_dict.copy() , _snake_case , return_labels=_snake_case )
# Get keys that were added with the _prepare_for_class function
lowerCAmelCase = prepared_for_class.keys() - inputs_dict.keys()
lowerCAmelCase = inspect.signature(model.call ).parameters
lowerCAmelCase = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCAmelCase = {0: 'input_ids'}
for label_key in label_keys:
lowerCAmelCase = signature_names.index(_snake_case )
lowerCAmelCase = label_key
lowerCAmelCase = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCAmelCase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCAmelCase = prepared_for_class[value]
lowerCAmelCase = tuple(_snake_case )
# Send to model
lowerCAmelCase = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFLayoutLMvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_snake_case ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_snake_case , return_tensors='tf' ).pixel_values
lowerCAmelCase = tf.constant([[1, 2]] )
lowerCAmelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , pixel_values=_snake_case , training=_snake_case )
# verify the logits
lowerCAmelCase = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , _snake_case )
lowerCAmelCase = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-4 ) )
| 4 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4 | 1 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__UpperCamelCase : Tuple = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
__UpperCamelCase : int = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCamelCase : str = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__UpperCamelCase : Any = '''allenai'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCAmelCase = dict((re.sub(R'@@$' , '' , _UpperCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCAmelCase ), v) for k, v in d.items() )
lowerCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowerCAmelCase = d[k] # restore
return da
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any ):
# prep
assert os.path.exists(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowerCAmelCase = basename(_UpperCAmelCase )
lowerCAmelCase = dirname(_UpperCAmelCase )
lowerCAmelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCAmelCase = cls.hub_models()
lowerCAmelCase = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCAmelCase = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
lowerCAmelCase = hub_utils.from_pretrained(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , archive_map=_UpperCAmelCase , **_UpperCAmelCase )
lowerCAmelCase = vars(chkpt['args']['model'] )
lowerCAmelCase = args['source_lang']
lowerCAmelCase = args['target_lang']
lowerCAmelCase = dirname(_UpperCAmelCase )
lowerCAmelCase = basename(_UpperCAmelCase )
# dicts
lowerCAmelCase = os.path.join(_UpperCAmelCase , F'dict.{src_lang}.txt' )
lowerCAmelCase = os.path.join(_UpperCAmelCase , F'dict.{tgt_lang}.txt' )
lowerCAmelCase = Dictionary.load(_UpperCAmelCase )
lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase = len(_UpperCAmelCase )
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'vocab-src.json' )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCAmelCase = True
for k in src_vocab.keys():
if not k.islower():
lowerCAmelCase = False
break
lowerCAmelCase = Dictionary.load(_UpperCAmelCase )
lowerCAmelCase = rewrite_dict_keys(tgt_dict.indices )
lowerCAmelCase = len(_UpperCAmelCase )
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'vocab-tgt.json' )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) )
# merges_file (bpecodes)
lowerCAmelCase = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ):
break
with open(_UpperCAmelCase , encoding='utf-8' ) as fin:
lowerCAmelCase = fin.read()
lowerCAmelCase = re.sub(R' \d+$' , '' , _UpperCAmelCase , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCAmelCase )
# model config
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
lowerCAmelCase = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCAmelCase = 5
lowerCAmelCase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCAmelCase = best_score_hparams[model_dir]['length_penalty']
else:
lowerCAmelCase = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) )
# tokenizer config
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) )
# model
lowerCAmelCase = chkpt['models'][0]
lowerCAmelCase = model.state_dict()
# rename keys to start with 'model.'
lowerCAmelCase = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCAmelCase = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = FSMTConfig.from_pretrained(_UpperCAmelCase )
lowerCAmelCase = FSMTForConditionalGeneration(_UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
# save
lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 4 |
"""simple docstring"""
from typing import Any
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = data
lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return F'Node({self.data})'
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase = self.head
while node:
yield node.data
lowerCAmelCase = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(_snake_case ) for item in self] )
def __getitem__( self , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase = self.head
for _ in range(_snake_case ):
lowerCAmelCase = current.next
lowerCAmelCase = data
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(len(self ) , _snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(0 , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase = Node(_snake_case )
if self.head is None:
lowerCAmelCase = new_node
elif index == 0:
lowerCAmelCase = self.head # link new_node to head
lowerCAmelCase = new_node
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = new_node
def UpperCamelCase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _snake_case = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase = self.head # default first node
if index == 0:
lowerCAmelCase = self.head.next
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = self.head
while current:
# Store the current node's next node.
lowerCAmelCase = current.next
# Make the current node's next point backwards
lowerCAmelCase = prev
# Make the previous node be the current node
lowerCAmelCase = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase = next_node
# Return prev in order to put the head at the end
lowerCAmelCase = prev
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_UpperCAmelCase ) == i
linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_UpperCAmelCase ) == 9
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase = linked_list.delete_head()
assert result == -9
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCAmelCase )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ():
from doctest import testmod
testmod()
lowerCAmelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_UpperCAmelCase )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
lowerCAmelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_UpperCAmelCase )
print(F'length of linked_list is : {len(_UpperCAmelCase )}' )
if __name__ == "__main__":
main()
| 4 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.