code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase_ ( _UpperCAmelCase ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
super().__init__(*A_ ,**A_ )
SCREAMING_SNAKE_CASE_ : Dict = {}
def snake_case ( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = super().add_tokens(A_ ,*A_ ,**A_ )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self ,snake_case__ ,*snake_case__ ,snake_case__=1 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
if num_vec_per_token == 1:
self.try_adding_tokens(A_ ,*A_ ,**A_ )
output.append(A_ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for i in range(A_ ):
SCREAMING_SNAKE_CASE_ : List[str] = placeholder_token + F'_{i}'
self.try_adding_tokens(A_ ,*A_ ,**A_ )
output.append(A_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
SCREAMING_SNAKE_CASE_ : Any = output
def snake_case ( self ,snake_case__ ,snake_case__=False ,snake_case__=1.0 ):
if isinstance(A_ ,A_ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
for i in range(len(A_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=A_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE_ : Tuple = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE_ : Optional[int] = tokens[: 1 + int(len(A_ ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE_ : Tuple = copy.copy(A_ )
random.shuffle(A_ )
SCREAMING_SNAKE_CASE_ : int = text.replace(A_ ,' '.join(A_ ) )
return text
def __call__( self ,snake_case__ ,*snake_case__ ,snake_case__=False ,snake_case__=1.0 ,**snake_case__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
A_ ,vector_shuffle=A_ ,prop_tokens_to_load=A_ ) ,*A_ ,**A_ ,)
def snake_case ( self ,snake_case__ ,*snake_case__ ,snake_case__=False ,snake_case__=1.0 ,**snake_case__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
A_ ,vector_shuffle=A_ ,prop_tokens_to_load=A_ ) ,*A_ ,**A_ ,)
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCamelCase__ : Any = 'src/transformers'
UpperCamelCase__ : Optional[Any] = 'docs/source/en'
UpperCamelCase__ : List[str] = '.'
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
while not lines[start_index].startswith(_lowerCamelCase ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = start_index
while not lines[end_index].startswith(_lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCamelCase__ : str = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
UpperCamelCase__ : int = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
UpperCamelCase__ : List[Any] = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCamelCase__ : Optional[Any] = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ : int = direct_transformers_import(TRANSFORMERS_PATH)
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _lowerCamelCase )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 if text == "✅" or text == "❌" else len(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (width - text_length) // 2
SCREAMING_SNAKE_CASE_ : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE_ : Optional[int] = collections.defaultdict(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = collections.defaultdict(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = collections.defaultdict(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple = collections.defaultdict(_lowerCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : Any = None
if attr_name.endswith('Tokenizer' ):
SCREAMING_SNAKE_CASE_ : List[str] = slow_tokenizers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
SCREAMING_SNAKE_CASE_ : Any = fast_tokenizers
SCREAMING_SNAKE_CASE_ : Optional[Any] = attr_name[:-13]
elif _re_tf_models.match(_lowerCamelCase ) is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = tf_models
SCREAMING_SNAKE_CASE_ : Tuple = _re_tf_models.match(_lowerCamelCase ).groups()[0]
elif _re_flax_models.match(_lowerCamelCase ) is not None:
SCREAMING_SNAKE_CASE_ : int = flax_models
SCREAMING_SNAKE_CASE_ : List[str] = _re_flax_models.match(_lowerCamelCase ).groups()[0]
elif _re_pt_models.match(_lowerCamelCase ) is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = pt_models
SCREAMING_SNAKE_CASE_ : Any = _re_pt_models.match(_lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
SCREAMING_SNAKE_CASE_ : str = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE_ : Dict = "".join(camel_case_split(_lowerCamelCase )[:-1] )
# Let's build that table!
SCREAMING_SNAKE_CASE_ : str = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
SCREAMING_SNAKE_CASE_ : Any = [len(_lowerCamelCase ) + 2 for c in columns]
SCREAMING_SNAKE_CASE_ : List[str] = max([len(_lowerCamelCase ) for name in model_names] ) + 2
# Build the table per se
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "|" + "|".join([_center_text(_lowerCamelCase , _lowerCamelCase ) for c, w in zip(_lowerCamelCase , _lowerCamelCase )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
SCREAMING_SNAKE_CASE_ : List[Any] = {True: "✅", False: "❌"}
for name in model_names:
SCREAMING_SNAKE_CASE_ : int = model_name_to_prefix[name]
SCREAMING_SNAKE_CASE_ : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCamelCase , _lowerCamelCase ) for l, w in zip(_lowerCamelCase , _lowerCamelCase )] ) + "|\n"
return table
def __UpperCAmelCase ( lowerCamelCase_ : int=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = _find_text_in_file(
filename=os.path.join(_lowerCamelCase , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCamelCase , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCamelCase__ : Tuple = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 706 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> int:
"""simple docstring"""
return "".join(chr(ord(_SCREAMING_SNAKE_CASE ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
from collections.abc import Sequence
def __UpperCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : float ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0.0
for coeff in reversed(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase__ : Any = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase__ : Optional[Any] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 708 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : list ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = len(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
SCREAMING_SNAKE_CASE_ : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 709 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained('google/mt5-small' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer('Hello there' ,return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE_ : Dict = tokenizer('Hi I am' ,return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE_ : Dict = model(lowerCamelCase_ ,labels=lowerCamelCase_ ).loss
SCREAMING_SNAKE_CASE_ : Optional[Any] = -tf.math.reduce_mean(lowerCamelCase_ ).numpy()
SCREAMING_SNAKE_CASE_ : Tuple = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 710 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( lowercase_ ):
__a : torch.FloatTensor
__a : torch.FloatTensor
__a : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( lowercase_ , lowercase_ ):
__a : List[Any] = 2
@register_to_config
def __init__( self ,snake_case__ = 0.02 ,snake_case__ = 100 ,snake_case__ = 1.007 ,snake_case__ = 80 ,snake_case__ = 0.05 ,snake_case__ = 50 ,):
SCREAMING_SNAKE_CASE_ : Tuple = sigma_max
# setable values
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None # sigma(t_i)
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
return sample
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Tuple = num_inference_steps
SCREAMING_SNAKE_CASE_ : List[Any] = np.arange(0 ,self.num_inference_steps )[::-1].copy()
SCREAMING_SNAKE_CASE_ : str = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(UpperCamelCase__ ,dtype=torch.floataa ,device=UpperCamelCase__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = None ):
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE_ : Dict = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE_ : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE_ : Any = self.config.s_noise * randn_tensor(sample.shape ,generator=UpperCamelCase__ ).to(sample.device )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigma + gamma * sigma
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = True ,):
SCREAMING_SNAKE_CASE_ : List[str] = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE_ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE_ : str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ ,derivative=UpperCamelCase__ ,pred_original_sample=UpperCamelCase__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE_ : int = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE_ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ ,derivative=UpperCamelCase__ ,pred_original_sample=UpperCamelCase__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
raise NotImplementedError()
| 711 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCamelCase__ : int = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
UpperCamelCase__ : Any = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Dict = " Hello world! cécé herlolip"
UpperCamelCase__ : List[Any] = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = dct.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = val
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.load(__lowerCAmelCase , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = emb.weight.shape
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=None ) -> str:
"""simple docstring"""
if not os.path.exists(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.hub.load('pytorch/fairseq' , __lowerCAmelCase ).eval()
else:
SCREAMING_SNAKE_CASE_ : List[str] = load_xsum_checkpoint(__lowerCAmelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoint_path.replace('.' , '-' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BartConfig.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = bart.encode(__lowerCAmelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Tuple = BartTokenizer.from_pretrained(__lowerCAmelCase ).encode(__lowerCAmelCase , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(__lowerCAmelCase , __lowerCAmelCase ).all():
raise ValueError(
F'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
SCREAMING_SNAKE_CASE_ : List[Any] = bart.state_dict()
remove_ignore_keys_(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = BartForSequenceClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = bart.predict('mnli' , __lowerCAmelCase , return_logits=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = model(__lowerCAmelCase )[0] # logits
else: # no classification heads to worry about
SCREAMING_SNAKE_CASE_ : List[Any] = bart.model.state_dict()
remove_ignore_keys_(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE_ : Any = bart.extract_features(__lowerCAmelCase )
if hf_checkpoint_name == "facebook/bart-large":
SCREAMING_SNAKE_CASE_ : Tuple = BartModel(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = model(__lowerCAmelCase ).model[0]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BartForConditionalGeneration(__lowerCAmelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(__lowerCAmelCase )
if hasattr(__lowerCAmelCase , 'lm_head' ):
SCREAMING_SNAKE_CASE_ : List[Any] = make_linear_from_emb(model.model.shared )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.model(__lowerCAmelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ : int = 50_00_00_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = set()
SCREAMING_SNAKE_CASE_ : Optional[int] = int((limit - 24) ** (1 / 2) )
SCREAMING_SNAKE_CASE_ : List[str] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE_ : int = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE_ : Tuple = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = primea * primea * primea * primea
SCREAMING_SNAKE_CASE_ : List[str] = square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 0 |
from bisect import bisect
from itertools import accumulate
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = sorted(zip(UpperCAmelCase__ , UpperCAmelCase__ ) , key=lambda lowerCamelCase_ : x[0] / x[1] , reverse=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(accumulate(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = bisect(UpperCAmelCase__ , UpperCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 0 |
UpperCamelCase__ : Dict = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
UpperCamelCase__ : int = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
UpperCamelCase__ : Optional[Any] = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCamelCase__ : str = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
UpperCamelCase__ : Any = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
UpperCamelCase__ : int = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
UpperCamelCase__ : List[str] = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
UpperCamelCase__ : Dict = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
from math import isqrt
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_UpperCamelCase ) + 1 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int = 10**6 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : int = 7
while prime_candidate < max_prime:
primes_count += is_prime(_UpperCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 716 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase__ : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.dirname(os.path.realpath(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(lowerCamelCase_ , 'words.txt' )
SCREAMING_SNAKE_CASE_ : str = ''
with open(lowerCamelCase_ ) as f:
SCREAMING_SNAKE_CASE_ : int = f.readline()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
SCREAMING_SNAKE_CASE_ : Tuple = [
word
for word in [sum(ord(lowerCamelCase_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCamelCase_ )
if __name__ == "__main__":
print(solution())
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=2 ,snake_case__=32 ,snake_case__=16 ,snake_case__=3 ,snake_case__=True ,snake_case__=True ,snake_case__=32 ,snake_case__=4 ,snake_case__=[0, 1, 2, 3] ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=[1, 384, 24, 24] ,snake_case__=True ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : int = batch_size
SCREAMING_SNAKE_CASE_ : str = image_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_labels
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = backbone_out_indices
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE_ : str = backbone_featmap_shape
SCREAMING_SNAKE_CASE_ : Tuple = scope
SCREAMING_SNAKE_CASE_ : str = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Any = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : int = num_patches + 1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=lowerCamelCase__ ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = DPTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = DPTForDepthEstimation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : str = DPTForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : int = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__a : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__a : Optional[Any] = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a : List[Any] = False
__a : Optional[int] = False
__a : str = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def snake_case ( self ):
pass
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowerCamelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
if model_class in get_values(lowerCamelCase__ ):
continue
SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = model(**lowerCamelCase__ ).loss
loss.backward()
def snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
SCREAMING_SNAKE_CASE_ : Any = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = model(**lowerCamelCase__ ).loss
loss.backward()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(config=lowerCamelCase__ )
# Skip the check for the backbone
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
SCREAMING_SNAKE_CASE_ : int = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self ):
pass
@slow
def snake_case ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
SCREAMING_SNAKE_CASE_ : List[str] = DPTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = '''add'''
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = DPTForDepthEstimation(lowerCamelCase__ )
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = prepare_img()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = outputs.predicted_depth
# verify the predicted depth
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,lowerCamelCase__ ,atol=1E-4 ) )
| 718 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 1.5
SCREAMING_SNAKE_CASE_ : Dict = int(factor * num_class_images )
SCREAMING_SNAKE_CASE_ : Optional[int] = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=snake_case_ , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=snake_case_ )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE_ : int = client.query(text=snake_case_ )
if len(snake_case_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
SCREAMING_SNAKE_CASE_ : Dict = int(factor * num_images )
SCREAMING_SNAKE_CASE_ : Any = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=snake_case_ , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = tqdm(desc='downloading real regularization images' , total=snake_case_ )
with open(F'{class_data_dir}/caption.txt' , 'w' ) as fa, open(F'{class_data_dir}/urls.txt' , 'w' ) as fa, open(
F'{class_data_dir}/images.txt' , 'w' ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE_ : Dict = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = requests.get(images['url'] )
if img.status_code == 2_00:
SCREAMING_SNAKE_CASE_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'{class_data_dir}/images/{total}.jpg' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser('' , add_help=snake_case_ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=snake_case_ , type=snake_case_ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=snake_case_ , type=snake_case_ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=2_00 , type=snake_case_ )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase__ : Any = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCAmelCase_ :
def __init__( self ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : int = value
SCREAMING_SNAKE_CASE_ : int = None # Added in order to delete a node easier
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} ,indent=1 )
class lowerCAmelCase_ :
def __init__( self ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Tuple = root
def __str__( self ):
return str(self.root )
def snake_case ( self ,snake_case__ ,snake_case__ ):
if new_children is not None: # reset its kids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case__ ): # If it is the right children
SCREAMING_SNAKE_CASE_ : str = new_children
else:
SCREAMING_SNAKE_CASE_ : str = new_children
else:
SCREAMING_SNAKE_CASE_ : Tuple = new_children
def snake_case ( self ,snake_case__ ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def snake_case ( self ):
return self.root is None
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = Node(snake_case__ ) # create a new Node
if self.empty(): # if Tree is empty
SCREAMING_SNAKE_CASE_ : int = new_node # set its root
else: # Tree is not empty
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent_node.left
else:
if parent_node.right is None:
SCREAMING_SNAKE_CASE_ : Tuple = new_node
break
else:
SCREAMING_SNAKE_CASE_ : int = parent_node.right
SCREAMING_SNAKE_CASE_ : Any = parent_node
def snake_case ( self ,*snake_case__ ):
for value in values:
self.__insert(snake_case__ )
def snake_case ( self ,snake_case__ ):
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
SCREAMING_SNAKE_CASE_ : int = node.left if value < node.value else node.right
return node
def snake_case ( self ,snake_case__ = None ):
if node is None:
if self.root is None:
return None
SCREAMING_SNAKE_CASE_ : int = self.root
if not self.empty():
while node.right is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = node.right
return node
def snake_case ( self ,snake_case__ = None ):
if node is None:
SCREAMING_SNAKE_CASE_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
SCREAMING_SNAKE_CASE_ : List[Any] = self.root
while node.left is not None:
SCREAMING_SNAKE_CASE_ : Any = node.left
return node
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.search(snake_case__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case__ ,snake_case__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case__ ,node.left )
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def snake_case ( self ,snake_case__ ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def snake_case ( self ,snake_case__=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def snake_case ( self ,snake_case__ ,snake_case__ ):
if node:
self.inorder(snake_case__ ,node.left )
arr.append(node.value )
self.inorder(snake_case__ ,node.right )
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
self.inorder(snake_case__ ,snake_case__ ) # append all values to list using inorder traversal
return arr[k - 1]
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
if curr_node is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
SCREAMING_SNAKE_CASE_ : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase__ )
# Prints all the elements of the list in order traversal
print(UpperCamelCase__ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase__ )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = abs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
while n > 0:
res += n % 10
n //= 10
return res
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = abs(lowerCamelCase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> int:
"""simple docstring"""
return sum(int(lowerCamelCase__ ) for c in str(abs(lowerCamelCase__ ) ) )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ) -> None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{func.__name__}({value})'
SCREAMING_SNAKE_CASE_ : Tuple = timeit(F'__main__.{call}' , setup='import __main__' )
print(F'{call:56} = {func(lowerCamelCase__ )} -- {timing:.4f} seconds' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCamelCase__ , lowerCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
SCREAMING_SNAKE_CASE_ : Optional[int] = number_of_bytes // partitions
SCREAMING_SNAKE_CASE_ : Dict = []
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = i * bytes_per_partition + 1
SCREAMING_SNAKE_CASE_ : List[str] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase__ : str = logging.get_logger(__name__)
# General docstring
UpperCamelCase__ : str = '''RegNetConfig'''
# Base docstring
UpperCamelCase__ : List[str] = '''facebook/regnet-y-040'''
UpperCamelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCamelCase__ : Dict = '''facebook/regnet-y-040'''
UpperCamelCase__ : List[str] = '''tabby, tabby cat'''
UpperCamelCase__ : Any = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ = 3 ,snake_case__ = 1 ,snake_case__ = 1 ,snake_case__ = "relu" ,**snake_case__ ,):
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE_ : Dict = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE_ : Any = tf.keras.layers.ConvaD(
filters=snake_case__ ,kernel_size=snake_case__ ,strides=snake_case__ ,padding='VALID' ,groups=snake_case__ ,use_bias=snake_case__ ,name='convolution' ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization' )
SCREAMING_SNAKE_CASE_ : int = ACTaFN[activation] if activation is not None else tf.identity
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.convolution(self.padding(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : int = self.normalization(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.activation(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = config.num_channels
SCREAMING_SNAKE_CASE_ : int = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE_ : str = tf.transpose(snake_case__ ,perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ : Dict = self.embedder(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ = 2 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tf.keras.layers.ConvaD(
filters=snake_case__ ,kernel_size=1 ,strides=snake_case__ ,use_bias=snake_case__ ,name='convolution' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization' )
def snake_case ( self ,snake_case__ ,snake_case__ = False ):
return self.normalization(self.convolution(snake_case__ ) ,training=snake_case__ )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ ,name='pooler' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=snake_case__ ,kernel_size=1 ,activation='relu' ,name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2' ),
]
def snake_case ( self ,snake_case__ ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pooler(snake_case__ )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE_ : List[Any] = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 1 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : Tuple = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : Any = (
TFRegNetShortCut(snake_case__ ,stride=snake_case__ ,name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE_ : Any = [
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0' ),
TFRegNetConvLayer(
snake_case__ ,stride=snake_case__ ,groups=snake_case__ ,activation=config.hidden_act ,name='layer.1' ),
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=snake_case__ ,name='layer.2' ),
]
SCREAMING_SNAKE_CASE_ : Dict = ACTaFN[config.hidden_act]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : Any = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.shortcut(snake_case__ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Any = self.activation(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 1 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : Tuple = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : List[str] = (
TFRegNetShortCut(snake_case__ ,stride=snake_case__ ,name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut' )
)
SCREAMING_SNAKE_CASE_ : List[str] = [
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0' ),
TFRegNetConvLayer(
snake_case__ ,stride=snake_case__ ,groups=snake_case__ ,activation=config.hidden_act ,name='layer.1' ),
TFRegNetSELayer(snake_case__ ,reduced_channels=int(round(in_channels / 4 ) ) ,name='layer.2' ),
TFRegNetConvLayer(snake_case__ ,kernel_size=1 ,activation=snake_case__ ,name='layer.3' ),
]
SCREAMING_SNAKE_CASE_ : int = ACTaFN[config.hidden_act]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : Dict = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.shortcut(snake_case__ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Tuple = self.activation(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 2 ,snake_case__ = 2 ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
SCREAMING_SNAKE_CASE_ : str = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ ,snake_case__ ,snake_case__ ,stride=snake_case__ ,name='layers.0' ),
*[layer(snake_case__ ,snake_case__ ,snake_case__ ,name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def snake_case ( self ,snake_case__ ):
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_module(snake_case__ )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,) )
SCREAMING_SNAKE_CASE_ : Any = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ ,snake_case__ ,snake_case__ ,depth=snake_case__ ,name=F'stages.{i+1}' ) )
def snake_case ( self ,snake_case__ ,snake_case__ = False ,snake_case__ = True ):
SCREAMING_SNAKE_CASE_ : Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_ : int = stage_module(snake_case__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ ,hidden_states=snake_case__ )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
__a : Tuple = RegNetConfig
def __init__( self ,snake_case__ ,**snake_case__ ):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = config
SCREAMING_SNAKE_CASE_ : Any = TFRegNetEmbeddings(snake_case__ ,name='embedder' )
SCREAMING_SNAKE_CASE_ : Dict = TFRegNetEncoder(snake_case__ ,name='encoder' )
SCREAMING_SNAKE_CASE_ : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ ,name='pooler' )
@unpack_inputs
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : Tuple = self.embedder(snake_case__ ,training=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.encoder(
snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,training=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ : int = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.transpose(snake_case__ ,perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE_ : Dict = tf.transpose(snake_case__ ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple([tf.transpose(snake_case__ ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ ,pooler_output=snake_case__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = RegNetConfig
__a : Dict = "regnet"
__a : Any = "pixel_values"
@property
def snake_case ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) ,dtype=tf.floataa )}
UpperCamelCase__ : Any = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase__ : List[str] = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCamelCase_ , )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
super().__init__(snake_case__ ,*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = TFRegNetMainLayer(snake_case__ ,name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=snake_case__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__=False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : List[Any] = self.regnet(
pixel_values=snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,training=snake_case__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCamelCase_ , )
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
def __init__( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
super().__init__(snake_case__ ,*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = config.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = TFRegNetMainLayer(snake_case__ ,name='regnet' )
# classification head
SCREAMING_SNAKE_CASE_ : List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=snake_case__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def snake_case ( self ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__=False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : int = self.regnet(
snake_case__ ,output_hidden_states=snake_case__ ,return_dict=snake_case__ ,training=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_ : List[str] = self.classifier[0](snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.classifier[1](snake_case__ )
SCREAMING_SNAKE_CASE_ : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ ,logits=snake_case__ )
if not return_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ ,logits=snake_case__ ,hidden_states=outputs.hidden_states )
| 701 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=128 ,snake_case__=32 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_input_mask
SCREAMING_SNAKE_CASE_ : str = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : int = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = type_vocab_size
SCREAMING_SNAKE_CASE_ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE_ : Tuple = num_choices
SCREAMING_SNAKE_CASE_ : List[Any] = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ):
(
SCREAMING_SNAKE_CASE_
) : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = NezhaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : List[Any] = NezhaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = NezhaForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = NezhaForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = NezhaForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ ,next_sentence_label=snake_case__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = NezhaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,start_positions=snake_case__ ,end_positions=snake_case__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = NezhaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = NezhaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = self.num_choices
SCREAMING_SNAKE_CASE_ : List[str] = NezhaForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Any = model(
snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,labels=snake_case__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__a : str = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Optional[Any] = True
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : List[Any] = super()._prepare_for_class(snake_case__ ,snake_case__ ,return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=snake_case__ )
return inputs_dict
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = NezhaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case__ )
def snake_case ( self ):
# This regression test was failing with PyTorch < 1.3
(
SCREAMING_SNAKE_CASE_
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE_ : Any = None
self.model_tester.create_and_check_model_as_decoder(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def snake_case ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = NezhaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = torch.jit.trace(
snake_case__ ,(inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ ,os.path.join(snake_case__ ,'bert.pt' ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.jit.load(os.path.join(snake_case__ ,'bert.pt' ) ,map_location=snake_case__ )
loaded(inputs_dict['input_ids'].to(snake_case__ ) ,inputs_dict['attention_mask'].to(snake_case__ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ ,attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,snake_case__ ,atol=1E-4 ) )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : List[str] = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,snake_case__ ,atol=1E-4 ) )
| 703 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any]=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
else:
SCREAMING_SNAKE_CASE_ : Tuple = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : Tuple = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : int = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = val
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ViTMSNConfig()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10_00
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'datasets/huggingface/label-files'
SCREAMING_SNAKE_CASE_ : int = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ : Dict = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : Tuple = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE_ : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[str] = 3_84
SCREAMING_SNAKE_CASE_ : Any = 15_36
SCREAMING_SNAKE_CASE_ : Tuple = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[Any] = 10_24
SCREAMING_SNAKE_CASE_ : Dict = 40_96
SCREAMING_SNAKE_CASE_ : Optional[Any] = 24
SCREAMING_SNAKE_CASE_ : Any = 16
SCREAMING_SNAKE_CASE_ : int = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Tuple = 7
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10_24
SCREAMING_SNAKE_CASE_ : Optional[int] = 40_96
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 24
SCREAMING_SNAKE_CASE_ : Any = 16
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1
SCREAMING_SNAKE_CASE_ : int = ViTMSNModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['target_encoder']
SCREAMING_SNAKE_CASE_ : Dict = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = create_rename_keys(lowerCamelCase_ , base_model=lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , base_model=lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
SCREAMING_SNAKE_CASE_ : Any = ViTImageProcessor(
size=config.image_size , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(images=lowerCamelCase_ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ : Any = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCamelCase_ , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
from functools import reduce
UpperCamelCase__ : Tuple = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( lowerCamelCase_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase__ : Dict = '''<<<<<<< This should probably be modified because it mentions: '''
UpperCamelCase__ : Union[str, Any] = '''=======
>>>>>>>
'''
UpperCamelCase__ : Optional[Any] = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
UpperCamelCase__ : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def __UpperCAmelCase ( lowerCamelCase_ : Namespace ) -> List[str]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase_ ( lowerCamelCase_ ):
@staticmethod
def snake_case ( snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = parser.add_parser(
'convert' ,help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' ,)
train_parser.add_argument(
'--tfds_path' ,type=snake_case__ ,required=snake_case__ ,help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' ,)
train_parser.add_argument(
'--datasets_directory' ,type=snake_case__ ,required=snake_case__ ,help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=snake_case__ )
def __init__( self ,snake_case__ ,snake_case__ ,*snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = get_logger('datasets-cli/converting' )
SCREAMING_SNAKE_CASE_ : List[str] = tfds_path
SCREAMING_SNAKE_CASE_ : str = datasets_directory
def snake_case ( self ):
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE_ : Any = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
SCREAMING_SNAKE_CASE_ : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Any = {}
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.listdir(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}' )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(snake_case__ ,snake_case__ )
if not os.path.isfile(snake_case__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(snake_case__ ,encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[str] = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Any = []
for line in lines:
SCREAMING_SNAKE_CASE_ : List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE_ : str = ''
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE_ : str = 'from datasets import logging\n'
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE_ : int = out_line.replace('getLogger' ,'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = list(filter(lambda snake_case__ : e in out_line ,snake_case__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case__ ) + '\n' )
out_lines.append(snake_case__ )
out_lines.append(snake_case__ )
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE_ : Dict = re.sub(snake_case__ ,snake_case__ ,snake_case__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE_ : List[Any] = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' ,snake_case__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE_ : List[str] = True
out_lines.append(snake_case__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = f_name.replace('.py' ,'' )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = os.path.join(snake_case__ ,snake_case__ )
os.makedirs(snake_case__ ,exist_ok=snake_case__ )
self._logger.info(F'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case__ )
if needs_manual_update:
with_manual_update.append(snake_case__ )
with open(snake_case__ ,'w' ,encoding='utf-8' ) as f:
f.writelines(snake_case__ )
self._logger.info(F'Converted in {output_file}' )
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.basename(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = imports_to_builder_map[f_name.replace('.py' ,'' )]
self._logger.info(F'Moving {dest_folder} to {utils_file}' )
shutil.copy(snake_case__ ,snake_case__ )
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 706 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 707 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ = 101 ):
SCREAMING_SNAKE_CASE_ : Tuple = length
def __len__( self ):
return self.length
def __getitem__( self ,snake_case__ ):
return i
class lowerCAmelCase_ :
def __call__( self ,snake_case__ ):
return {"input_ids": torch.tensor(snake_case__ ), "labels": torch.tensor(snake_case__ )}
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(120 ,80 )
def snake_case ( self ,snake_case__ ,snake_case__=None ):
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase_ ( lowerCamelCase_ ):
@require_torch_neuroncore
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : List[Any] = F'--output_dir {output_dir}'.split()
SCREAMING_SNAKE_CASE_ : str = ['torchrun'] + distributed_args + args
execute_subprocess_async(snake_case__ ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase_ ( lowerCamelCase_ ):
@require_torch_multi_gpu
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
SCREAMING_SNAKE_CASE_ : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'--output_dir {output_dir}'.split()
SCREAMING_SNAKE_CASE_ : List[Any] = ['torchrun'] + distributed_args + args
execute_subprocess_async(snake_case__ ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCamelCase__ : Dict = HfArgumentParser((TrainingArguments,))
UpperCamelCase__ : str = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
UpperCamelCase__ : Tuple = DummyDataset(dataset_length)
def __UpperCAmelCase ( lowerCamelCase_ : EvalPrediction ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = list(range(len(lowerCamelCase_ ) ) )
SCREAMING_SNAKE_CASE_ : Any = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
UpperCamelCase__ : Any = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCamelCase__ : Optional[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCamelCase__ : str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCamelCase__ : Optional[int] = 2
UpperCamelCase__ : Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCamelCase__ : Tuple = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCamelCase__ : List[Any] = None
| 708 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 0 |
from __future__ import annotations
import numpy as np
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.shape(lowerCamelCase_ )
if rows != columns:
SCREAMING_SNAKE_CASE_ : List[Any] = (
'\'table\' has to be of square shaped array but got a '
F'{rows}x{columns} array:\n{table}'
)
raise ValueError(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros((rows, columns) )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = sum(lower[i][k] * upper[k][j] for k in range(lowerCamelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
SCREAMING_SNAKE_CASE_ : Any = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
for j in range(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = sum(lower[i][k] * upper[k][j] for k in range(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = sum(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ : List[str] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE_ : Tuple = s - 2 * j
break
return diff
| 710 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 0 |
UpperCamelCase__ : Any = 8.31_44_62 # Unit - J mol-1 K-1
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase__ : int = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Tuple = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase_ , id=lowerCamelCase_ )
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
import re
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 713 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
SCREAMING_SNAKE_CASE_ : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('RGB' )
return image
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = val
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat((q_bias, torch.zeros_like(lowerCamelCase_ , requires_grad=lowerCamelCase_ ), v_bias) )
SCREAMING_SNAKE_CASE_ : Optional[int] = qkv_bias
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3_64 if 'coco' in model_name else 2_24
SCREAMING_SNAKE_CASE_ : Tuple = BlipaVisionConfig(image_size=lowerCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
SCREAMING_SNAKE_CASE_ : Any = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowerCamelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowerCamelCase_ ).to_dict()
elif "t5-xl" in model_name:
SCREAMING_SNAKE_CASE_ : Tuple = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
SCREAMING_SNAKE_CASE_ : Tuple = BlipaConfig(vision_config=lowerCamelCase_ , text_config=lowerCamelCase_ )
return config, image_size
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any=None , lowerCamelCase_ : int=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
SCREAMING_SNAKE_CASE_ : int = tokenizer('\n' , add_special_tokens=lowerCamelCase_ ).input_ids[0]
SCREAMING_SNAKE_CASE_ : str = get_blipa_config(lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BlipaForConditionalGeneration(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
SCREAMING_SNAKE_CASE_ : List[str] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
SCREAMING_SNAKE_CASE_ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE_ : Tuple = load_model_and_preprocess(
name=lowerCamelCase_ , model_type=lowerCamelCase_ , is_eval=lowerCamelCase_ , device=lowerCamelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
SCREAMING_SNAKE_CASE_ : Tuple = original_model.state_dict()
SCREAMING_SNAKE_CASE_ : List[str] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict.pop(lowerCamelCase_ )
if key.startswith('Qformer.bert' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('self' , 'attention' )
if "opt_proj" in key:
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('t5' , 'language' )
SCREAMING_SNAKE_CASE_ : int = val
# read in qv biases
read_in_q_v_bias(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = hf_model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
SCREAMING_SNAKE_CASE_ : int = load_demo_image()
SCREAMING_SNAKE_CASE_ : List[Any] = vis_processors['eval'](lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowerCamelCase_ )
# create processor
SCREAMING_SNAKE_CASE_ : str = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BlipaProcessor(image_processor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' ).pixel_values.to(lowerCamelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
hf_model.to(lowerCamelCase_ )
with torch.no_grad():
if "opt" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
SCREAMING_SNAKE_CASE_ : Tuple = hf_model(lowerCamelCase_ , lowerCamelCase_ ).logits
else:
SCREAMING_SNAKE_CASE_ : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
SCREAMING_SNAKE_CASE_ : str = hf_model(lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[[-41.58_50, -4.4_4_4_0, -8.9_9_2_2], [-47.43_22, -5.9_1_4_3, -1.7_3_4_0]] , device=lowerCamelCase_ )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(
[[-57.01_09, -9.8_9_6_7, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=lowerCamelCase_ )
else:
# cast to same type
SCREAMING_SNAKE_CASE_ : Any = logits.dtype
assert torch.allclose(original_logits.to(lowerCamelCase_ ) , lowerCamelCase_ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
SCREAMING_SNAKE_CASE_ : List[str] = ''
SCREAMING_SNAKE_CASE_ : Any = tokenizer(lowerCamelCase_ , return_tensors='pt' ).input_ids.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_model.generate({'image': original_pixel_values} )
SCREAMING_SNAKE_CASE_ : str = hf_model.generate(
lowerCamelCase_ , lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = input_ids.shape[1]
SCREAMING_SNAKE_CASE_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = [text.strip() for text in output_text]
print('HF generation:' , lowerCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
UpperCamelCase__ : Optional[int] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCamelCase__ : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : str = "levit"
def __init__( self ,snake_case__=224 ,snake_case__=3 ,snake_case__=3 ,snake_case__=2 ,snake_case__=1 ,snake_case__=16 ,snake_case__=[128, 256, 384] ,snake_case__=[4, 8, 12] ,snake_case__=[4, 4, 4] ,snake_case__=[16, 16, 16] ,snake_case__=0 ,snake_case__=[2, 2, 2] ,snake_case__=[2, 2, 2] ,snake_case__=0.02 ,**snake_case__ ,):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = kernel_size
SCREAMING_SNAKE_CASE_ : Optional[int] = stride
SCREAMING_SNAKE_CASE_ : Dict = padding
SCREAMING_SNAKE_CASE_ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = depths
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key_dim
SCREAMING_SNAKE_CASE_ : int = drop_path_rate
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Dict = attention_ratio
SCREAMING_SNAKE_CASE_ : int = mlp_ratio
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : int = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Any = version.parse("1.11" )
@property
def snake_case ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
return 1E-4
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def snake_case ( self ,snake_case__=0 ):
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor((1, 3, 128, 128) ,rng=random.Random(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.RandomState(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : str = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
# warmup pass to apply optimizations
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**self.get_dummy_inputs() )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : str = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : str = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def snake_case ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : List[str] = False
return options
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=snake_case__ ,feature_extractor=snake_case__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=snake_case__ ,image=snake_case__ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=snake_case__ ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : str = output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ : Optional[int] = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ : str = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
SCREAMING_SNAKE_CASE_ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=snake_case__ ,safety_checker=snake_case__ ,feature_extractor=snake_case__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ : str = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=snake_case__ ,image=snake_case__ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=snake_case__ ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : Tuple = output.images
SCREAMING_SNAKE_CASE_ : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_ : str = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 716 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 718 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 0 |
from scipy.stats import pearsonr
import datasets
UpperCamelCase__ : Any = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
UpperCamelCase__ : int = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
UpperCamelCase__ : List[str] = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) ,reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=False ):
if return_pvalue:
SCREAMING_SNAKE_CASE_ : int = pearsonr(snake_case__ ,snake_case__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(snake_case__ ,snake_case__ )[0] )}
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = len(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = [0] * len_array
if len_array > 0:
SCREAMING_SNAKE_CASE_ : List[str] = array[0]
for i in range(1 ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = self.prefix_sum[i - 1] + array[i]
def snake_case ( self ,snake_case__ ,snake_case__ ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(snake_case__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ : List[str] = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCamelCase__ : List[Any] = {
'''facebook/blenderbot_small-90M''': 5_12,
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Tuple = BlenderbotSmallTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,snake_case__=True ,**snake_case__ ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case__ ,merges=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ ,) ,bos_token=snake_case__ ,eos_token=snake_case__ ,unk_token=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCamelCase__ : Any = '''http://www.mocksite.com/file1.txt'''
UpperCamelCase__ : Any = '''"text": ["foo", "foo"]'''
UpperCamelCase__ : Union[str, Any] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class lowerCAmelCase_ :
__a : Any = 2_00
__a : Tuple = {"Content-Length": "100"}
__a : List[Any] = {}
def snake_case ( self ,**snake_case__ ):
return [bytes(snake_case__ ,'utf-8' )]
def __UpperCAmelCase ( *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
import requests
monkeypatch.setattr(lowerCamelCase_ , 'request' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = URL
if issubclass(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = url
elif issubclass(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [url]
elif issubclass(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = {'train': url}
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'dummy'
SCREAMING_SNAKE_CASE_ : Tuple = 'downloads'
SCREAMING_SNAKE_CASE_ : int = tmp_path
SCREAMING_SNAKE_CASE_ : str = DownloadConfig(
cache_dir=os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , use_etag=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Dict = DownloadManager(dataset_name=lowerCamelCase_ , download_config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = dl_manager.download(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = [downloaded_paths]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [urls]
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE_ : int = downloaded_paths.values()
SCREAMING_SNAKE_CASE_ : Dict = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCamelCase_ , lowerCamelCase_ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE_ : Dict = Path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE_ : List[Any] = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE_ : List[Any] = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE_ : Any = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
if issubclass(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = filename
elif issubclass(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = [filename]
elif issubclass(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = {'train': filename}
SCREAMING_SNAKE_CASE_ : Dict = 'dummy'
SCREAMING_SNAKE_CASE_ : Any = xz_file.parent
SCREAMING_SNAKE_CASE_ : int = 'extracted'
SCREAMING_SNAKE_CASE_ : Optional[int] = DownloadConfig(
cache_dir=lowerCamelCase_ , use_etag=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = DownloadManager(dataset_name=lowerCamelCase_ , download_config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dl_manager.extract(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = [extracted_paths]
SCREAMING_SNAKE_CASE_ : Dict = [paths]
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE_ : Tuple = extracted_paths.values()
SCREAMING_SNAKE_CASE_ : Dict = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCamelCase_ , lowerCamelCase_ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCamelCase_ , etag=lowerCamelCase_ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE_ : str = extracted_path.read_text()
SCREAMING_SNAKE_CASE_ : List[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCamelCase_ , start=1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = request.getfixturevalue(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCamelCase_ ) , start=1 ):
_test_jsonl(lowerCamelCase_ , lowerCamelCase_ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = request.getfixturevalue(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCamelCase_ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCamelCase_ ) , start=1 ):
_test_jsonl(lowerCamelCase_ , lowerCamelCase_ )
assert num_tar == 1
assert num_jsonl == 2
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCamelCase_ ) , start=1 ):
assert os.path.basename(lowerCamelCase_ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Dict = "deformable_detr"
__a : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self ,snake_case__=True ,snake_case__=None ,snake_case__=3 ,snake_case__=300 ,snake_case__=1024 ,snake_case__=6 ,snake_case__=1024 ,snake_case__=8 ,snake_case__=6 ,snake_case__=1024 ,snake_case__=8 ,snake_case__=0.0 ,snake_case__=True ,snake_case__="relu" ,snake_case__=256 ,snake_case__=0.1 ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=0.02 ,snake_case__=1.0 ,snake_case__=True ,snake_case__=False ,snake_case__="sine" ,snake_case__="resnet50" ,snake_case__=True ,snake_case__=False ,snake_case__=4 ,snake_case__=4 ,snake_case__=4 ,snake_case__=False ,snake_case__=300 ,snake_case__=False ,snake_case__=1 ,snake_case__=5 ,snake_case__=2 ,snake_case__=1 ,snake_case__=1 ,snake_case__=5 ,snake_case__=2 ,snake_case__=0.1 ,snake_case__=0.25 ,snake_case__=False ,**snake_case__ ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE_ : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE_ : Tuple = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ : List[Any] = config_class.from_dict(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = use_timm_backbone
SCREAMING_SNAKE_CASE_ : Any = backbone_config
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = num_queries
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = d_model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE_ : int = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Any = dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE_ : Tuple = init_std
SCREAMING_SNAKE_CASE_ : Optional[int] = init_xavier_std
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : str = auxiliary_loss
SCREAMING_SNAKE_CASE_ : str = position_embedding_type
SCREAMING_SNAKE_CASE_ : Optional[int] = backbone
SCREAMING_SNAKE_CASE_ : List[str] = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ : Any = dilation
# deformable attributes
SCREAMING_SNAKE_CASE_ : Optional[int] = num_feature_levels
SCREAMING_SNAKE_CASE_ : Any = encoder_n_points
SCREAMING_SNAKE_CASE_ : Dict = decoder_n_points
SCREAMING_SNAKE_CASE_ : List[str] = two_stage
SCREAMING_SNAKE_CASE_ : Optional[Any] = two_stage_num_proposals
SCREAMING_SNAKE_CASE_ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
SCREAMING_SNAKE_CASE_ : List[str] = class_cost
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox_cost
SCREAMING_SNAKE_CASE_ : Tuple = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ : List[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ : List[str] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ : Union[str, Any] = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ : List[str] = eos_coefficient
SCREAMING_SNAKE_CASE_ : Any = focal_alpha
SCREAMING_SNAKE_CASE_ : Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ ,**snake_case__ )
@property
def snake_case ( self ):
return self.encoder_attention_heads
@property
def snake_case ( self ):
return self.d_model
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ : List[str] = self.__class__.model_type
return output
| 701 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Any = ["pixel_values"]
def __init__( self ,snake_case__ = True ,snake_case__ = None ,snake_case__ = PILImageResampling.BICUBIC ,snake_case__ = True ,snake_case__ = None ,snake_case__ = True ,snake_case__ = 1 / 255 ,snake_case__ = True ,snake_case__ = None ,snake_case__ = None ,snake_case__ = True ,**snake_case__ ,):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(snake_case__ ,default_to_square=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(snake_case__ ,default_to_square=snake_case__ ,param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize
SCREAMING_SNAKE_CASE_ : Tuple = size
SCREAMING_SNAKE_CASE_ : Optional[Any] = resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE_ : Optional[int] = crop_size
SCREAMING_SNAKE_CASE_ : Dict = do_rescale
SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ : Any = do_convert_rgb
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = PILImageResampling.BICUBIC ,snake_case__ = None ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(snake_case__ ,default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(snake_case__ ,size=size['shortest_edge'] ,default_to_square=snake_case__ )
return resize(snake_case__ ,size=snake_case__ ,resample=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = None ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(snake_case__ ,size=(size['height'], size['width']) ,data_format=snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ = None ,**snake_case__ ,):
return rescale(snake_case__ ,scale=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,**snake_case__ ,):
return normalize(snake_case__ ,mean=snake_case__ ,std=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = None ,snake_case__ = ChannelDimension.FIRST ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Tuple = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(snake_case__ ,param_name='size' ,default_to_square=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(snake_case__ ,param_name='crop_size' ,default_to_square=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Any = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ : Tuple = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ : Optional[int] = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Any = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : int = [self.resize(image=snake_case__ ,size=snake_case__ ,resample=snake_case__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : List[str] = [self.center_crop(image=snake_case__ ,size=snake_case__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : List[Any] = [self.rescale(image=snake_case__ ,scale=snake_case__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : int = [self.normalize(image=snake_case__ ,mean=snake_case__ ,std=snake_case__ ) for image in images]
SCREAMING_SNAKE_CASE_ : Any = [to_channel_dimension_format(snake_case__ ,snake_case__ ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': images}
return BatchFeature(data=snake_case__ ,tensor_type=snake_case__ )
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return x + 2
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'x = 3'
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : int = evaluate(snake_case__ ,{} ,state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ ,{'x': 3} )
SCREAMING_SNAKE_CASE_ : Dict = 'x = y'
SCREAMING_SNAKE_CASE_ : List[str] = {'y': 5}
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(snake_case__ ,{} ,state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 5, 'y': 5} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = 'y = add_two(x)'
SCREAMING_SNAKE_CASE_ : List[str] = {'x': 3}
SCREAMING_SNAKE_CASE_ : int = evaluate(snake_case__ ,{'add_two': add_two} ,state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = 'x = 3'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ ,{'x': 3} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
SCREAMING_SNAKE_CASE_ : int = {'x': 3}
SCREAMING_SNAKE_CASE_ : Optional[int] = evaluate(snake_case__ ,{'add_two': add_two} ,state=snake_case__ )
self.assertDictEqual(snake_case__ ,{'x': 3, 'y': 5} )
self.assertDictEqual(snake_case__ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = 'x = 3\ny = 5'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 3, 'y': 5} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = 'text = f\'This is x: {x}.\''
SCREAMING_SNAKE_CASE_ : Any = {'x': 3}
SCREAMING_SNAKE_CASE_ : List[str] = evaluate(snake_case__ ,{} ,state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(snake_case__ ,{'x': 3, 'text': 'This is x: 3.'} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
SCREAMING_SNAKE_CASE_ : Dict = {'x': 3}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(snake_case__ ,{'x': 3, 'y': 2} )
SCREAMING_SNAKE_CASE_ : Dict = {'x': 8}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 8, 'y': 5} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'test_list = [x, add_two(x)]'
SCREAMING_SNAKE_CASE_ : Dict = {'x': 3}
SCREAMING_SNAKE_CASE_ : Dict = evaluate(snake_case__ ,{'add_two': add_two} ,state=snake_case__ )
self.assertListEqual(snake_case__ ,[3, 5] )
self.assertDictEqual(snake_case__ ,{'x': 3, 'test_list': [3, 5]} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = 'y = x'
SCREAMING_SNAKE_CASE_ : Dict = {'x': 3}
SCREAMING_SNAKE_CASE_ : List[Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ ,{'x': 3, 'y': 3} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'test_list = [x, add_two(x)]\ntest_list[1]'
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'x': 3}
SCREAMING_SNAKE_CASE_ : int = evaluate(snake_case__ ,{'add_two': add_two} ,state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 3, 'test_list': [3, 5]} )
SCREAMING_SNAKE_CASE_ : Tuple = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
SCREAMING_SNAKE_CASE_ : List[Any] = {'x': 3}
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(snake_case__ ,{'add_two': add_two} ,state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'x = 0\nfor i in range(3):\n x = i'
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = evaluate(snake_case__ ,{'range': range} ,state=snake_case__ )
assert result == 2
self.assertDictEqual(snake_case__ ,{'x': 2, 'i': 2} )
| 703 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ : Optional[int] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCamelCase__ : str = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCamelCase__ : str = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] ,)
def snake_case ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__="uniform_average" ,snake_case__=True ):
SCREAMING_SNAKE_CASE_ : Any = mean_squared_error(
snake_case__ ,snake_case__ ,sample_weight=snake_case__ ,multioutput=snake_case__ ,squared=snake_case__ )
return {"mse": mse}
| 706 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> List[Any]:
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE_ : int = ord(lowerCamelCase_ )
if not _is_chinese_char(lowerCamelCase_ ):
return 0
return 1
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = set()
for token in tokens:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowerCamelCase_ ) > 1 and is_chinese(lowerCamelCase_ )
if chinese_word:
word_set.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = list(lowerCamelCase_ )
return word_list
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : set() ) -> Optional[int]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE_ : str = max([len(lowerCamelCase_ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE_ : Tuple = bert_tokens
SCREAMING_SNAKE_CASE_ : Tuple = 0, len(lowerCamelCase_ )
while start < end:
SCREAMING_SNAKE_CASE_ : List[str] = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE_ : Any = min(end - start , lowerCamelCase_ )
for i in range(lowerCamelCase_ , 1 , -1 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE_ : Tuple = '##' + bert_word[j]
SCREAMING_SNAKE_CASE_ : Tuple = start + i
SCREAMING_SNAKE_CASE_ : Optional[int] = False
break
if single_word:
start += 1
return bert_word
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : LTP , lowerCamelCase_ : BertTokenizer ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
for i in range(0 , len(lowerCamelCase_ ) , 1_00 ):
SCREAMING_SNAKE_CASE_ : Any = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
SCREAMING_SNAKE_CASE_ : Dict = [get_chinese_word(lowerCamelCase_ ) for r in res]
ltp_res.extend(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(0 , len(lowerCamelCase_ ) , 1_00 ):
SCREAMING_SNAKE_CASE_ : str = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = []
for input_ids, chinese_word in zip(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = []
for id in input_ids:
SCREAMING_SNAKE_CASE_ : str = bert_tokenizer._convert_id_to_token(lowerCamelCase_ )
input_tokens.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = add_sub_symbol(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase_ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase_ ) == 1 and _is_chinese_char(ord(lowerCamelCase_ ) ):
ref_id.append(lowerCamelCase_ )
ref_ids.append(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
return ref_ids
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
SCREAMING_SNAKE_CASE_ : str = [line.strip() for line in data if len(lowerCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE_ : List[Any] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE_ : str = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE_ : Dict = prepare_ref(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = [json.dumps(lowerCamelCase_ ) + '\n' for ref in ref_ids]
f.writelines(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
UpperCamelCase__ : Optional[int] = parser.parse_args()
main(args)
| 707 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
with open(lowerCamelCase_ ) as metadata_file:
SCREAMING_SNAKE_CASE_ : str = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = LukeConfig(use_entity_aware_attention=lowerCamelCase_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE_ : int = load_original_entity_vocab(lowerCamelCase_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE_ : str = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken('<ent>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = AddedToken('<ent2>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , 'tokenizer_config.json' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = 'MLukeTokenizer'
with open(os.path.join(lowerCamelCase_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = MLukeTokenizer.from_pretrained(lowerCamelCase_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE_ : Any = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE_ : str = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE_ : str = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Any = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE_ : str = state_dict[bias_name]
SCREAMING_SNAKE_CASE_ : Any = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : int = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = F'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE_ : Any = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : Any = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE_ : Optional[Any] = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE_ : str = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : str = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE_ : Dict = LukeForMaskedLM(config=lowerCamelCase_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE_ : Any = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict[key]
else:
SCREAMING_SNAKE_CASE_ : str = state_dict[key]
SCREAMING_SNAKE_CASE_ : int = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
if set(lowerCamelCase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCamelCase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE_ : Tuple = MLukeTokenizer.from_pretrained(lowerCamelCase_ , task='entity_classification' )
SCREAMING_SNAKE_CASE_ : Optional[int] = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE_ : Optional[Any] = (0, 9)
SCREAMING_SNAKE_CASE_ : Any = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowerCamelCase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE_ : int = MLukeTokenizer.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE_ : Dict = (24, 30)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Dict = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE_ : Any = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE_ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE_ : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCamelCase_ ) )
model.save_pretrained(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE_ : Optional[int] = [json.loads(lowerCamelCase_ ) for line in open(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
for entry in data:
SCREAMING_SNAKE_CASE_ : int = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE_ : List[Any] = entity_id
break
SCREAMING_SNAKE_CASE_ : Optional[int] = F'{language}:{entity_name}'
SCREAMING_SNAKE_CASE_ : str = entity_id
return new_mapping
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 708 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
super().__init__(*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = {}
def snake_case ( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = super().add_tokens(snake_case__ ,*snake_case__ ,**snake_case__ )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self ,snake_case__ ,*snake_case__ ,snake_case__=1 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ ,*snake_case__ ,**snake_case__ )
output.append(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = placeholder_token + F'_{i}'
self.try_adding_tokens(snake_case__ ,*snake_case__ ,**snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
SCREAMING_SNAKE_CASE_ : str = output
def snake_case ( self ,snake_case__ ,snake_case__=False ,snake_case__=1.0 ):
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = []
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE_ : List[str] = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE_ : Tuple = tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE_ : Tuple = copy.copy(snake_case__ )
random.shuffle(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace(snake_case__ ,' '.join(snake_case__ ) )
return text
def __call__( self ,snake_case__ ,*snake_case__ ,snake_case__=False ,snake_case__=1.0 ,**snake_case__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ ,vector_shuffle=snake_case__ ,prop_tokens_to_load=snake_case__ ) ,*snake_case__ ,**snake_case__ ,)
def snake_case ( self ,snake_case__ ,*snake_case__ ,snake_case__=False ,snake_case__=1.0 ,**snake_case__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ ,vector_shuffle=snake_case__ ,prop_tokens_to_load=snake_case__ ) ,*snake_case__ ,**snake_case__ ,)
| 709 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : Any = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_ : str = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
SCREAMING_SNAKE_CASE_ : int = F'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(lowerCamelCase_ , 'README.md' )
print(F'Generating {path}' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
# make sure we are under the root of the project
UpperCamelCase__ : List[str] = Path(__file__).resolve().parent.parent.parent
UpperCamelCase__ : Union[str, Any] = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase__ : Optional[Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 711 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = -1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE_ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE_ : List[str] = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE_ : Optional[int] = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE_ : List[str] = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase__ : List[Any] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(lowerCamelCase_ , map_location='cpu' )
return sd
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=rename_keys_prefix ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = OrderedDict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE_ : List[str] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE_ : List[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE_ : Optional[int] = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Any = 'pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Dict = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : List[Any] = {'visual_embedding_dim': 5_12}
SCREAMING_SNAKE_CASE_ : Tuple = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : List[Any] = {'visual_embedding_dim': 20_48}
SCREAMING_SNAKE_CASE_ : Any = 'vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : int = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
SCREAMING_SNAKE_CASE_ : Tuple = 'vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : List[Any] = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE_ : Tuple = 'nlvr'
SCREAMING_SNAKE_CASE_ : Optional[int] = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE_ : Tuple = load_state_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE_ : Optional[int] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE_ : str = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE_ : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase__ : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 713 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(lowerCamelCase_ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : List[str] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE_ : List[str] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE_ : List[str] = v
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = v
SCREAMING_SNAKE_CASE_ : int = chkpt['params']
SCREAMING_SNAKE_CASE_ : str = {n: v for n, v in config.items() if not isinstance(lowerCamelCase_ , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE_ : Optional[Any] = chkpt['dico_word2id']
SCREAMING_SNAKE_CASE_ : Optional[int] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : Any = pytorch_dump_folder_path + '/' + CONFIG_NAME
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase_ , indent=2 ) + '\n' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase_ , indent=2 ) + '\n' )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ : List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase__ : List[Any] = float('''nan''')
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = sys.stdout
SCREAMING_SNAKE_CASE_ : Optional[Any] = open(snake_case__ ,'a' )
def __getattr__( self ,snake_case__ ):
return getattr(self.stdout ,snake_case__ )
def snake_case ( self ,snake_case__ ):
self.stdout.write(snake_case__ )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' ,'' ,snake_case__ ,0 ,re.M ) )
def __UpperCAmelCase ( lowerCamelCase_ : str=80 , lowerCamelCase_ : List[str]=False ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
# deal with critical env vars
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.environ.get(lowerCamelCase_ , lowerCamelCase_ )
if val is not None:
cmd.append(F'{key}={val}' )
# python executable (not always needed if the script is executable)
SCREAMING_SNAKE_CASE_ : int = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(lowerCamelCase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Tuple = ''
while len(lowerCamelCase_ ) > 0:
current_line += F'{cmd.pop(0 )} '
if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = ''
return "\\\n".join(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = re.sub(R'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
SCREAMING_SNAKE_CASE_ : Dict = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += F' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
SCREAMING_SNAKE_CASE_ : str = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
SCREAMING_SNAKE_CASE_ : Tuple = subprocess.run(lowerCamelCase_ , capture_output=lowerCamelCase_ , text=lowerCamelCase_ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
SCREAMING_SNAKE_CASE_ : Dict = variation.replace(' ' , '-' )
with open(Path(lowerCamelCase_ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f:
f.write(result.stdout )
with open(Path(lowerCamelCase_ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.load(lowerCamelCase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : str , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'{id}: {variation:<{longest_variation_len}}'
SCREAMING_SNAKE_CASE_ : Optional[int] = F'{preamble}: '
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowerCamelCase_ ) , desc=lowerCamelCase_ , leave=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = process_run_single(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = single_run_metrics[target_metric_key]
if not math.isnan(lowerCamelCase_ ):
metrics.append(lowerCamelCase_ )
results.append(lowerCamelCase_ )
outcome += "✓"
else:
outcome += "✘"
SCREAMING_SNAKE_CASE_ : Tuple = F'\33[2K\r{outcome}'
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE_ : Tuple = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
SCREAMING_SNAKE_CASE_ : List[Any] = round(mean_metrics[target_metric_key] , 2 )
SCREAMING_SNAKE_CASE_ : List[str] = F'{outcome} {mean_target}'
if len(lowerCamelCase_ ) > 1:
results_str += F' {tuple(round(lowerCamelCase_ , 2 ) for x in results )}'
print(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = variation
return mean_metrics
else:
print(lowerCamelCase_ )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.cuda.get_device_properties(torch.device('cuda' ) )
return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = pd.DataFrame(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'variation'
SCREAMING_SNAKE_CASE_ : List[str] = 'diff_%'
SCREAMING_SNAKE_CASE_ : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
SCREAMING_SNAKE_CASE_ : Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowerCamelCase_ ):
# as a fallback, use the minimal value as the sentinel
SCREAMING_SNAKE_CASE_ : Tuple = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = df.apply(
lambda lowerCamelCase_ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
SCREAMING_SNAKE_CASE_ : str = [variation_key, target_metric_key, diff_key, *report_metric_keys]
SCREAMING_SNAKE_CASE_ : List[str] = df.reindex(lowerCamelCase_ , axis='columns' ) # reorder cols
# capitalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
SCREAMING_SNAKE_CASE_ : Any = df.rename(lambda lowerCamelCase_ : c.replace('_' , '<br>' ) , axis='columns' )
SCREAMING_SNAKE_CASE_ : List[str] = df.rename(lambda lowerCamelCase_ : c.replace('_' , '\n' ) , axis='columns' )
SCREAMING_SNAKE_CASE_ : List[str] = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowerCamelCase_ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowerCamelCase_ , floatfmt='.2f' )]
print('\n\n'.join(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=lowerCamelCase_ , type=lowerCamelCase_ , nargs='+' , required=lowerCamelCase_ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=lowerCamelCase_ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=lowerCamelCase_ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=lowerCamelCase_ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=lowerCamelCase_ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.output_dir
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = get_base_command(lowerCamelCase_ , lowerCamelCase_ )
# split each dimension into its --foo variations
SCREAMING_SNAKE_CASE_ : str = [list(map(str.strip , re.split(R'\|' , lowerCamelCase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
SCREAMING_SNAKE_CASE_ : int = list(map(str.strip , map(' '.join , itertools.product(*lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(len(lowerCamelCase_ ) for x in variations )
# split wanted keys
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(F'and this script\'s output is also piped into {report_fn}' )
SCREAMING_SNAKE_CASE_ : List[str] = Tee(lowerCamelCase_ )
print(F'\n*** Running {len(lowerCamelCase_ )} benchmarks:' )
print(F'Base command: {" ".join(lowerCamelCase_ )}' )
SCREAMING_SNAKE_CASE_ : List[Any] = 'variation'
SCREAMING_SNAKE_CASE_ : Any = []
for id, variation in enumerate(tqdm(lowerCamelCase_ , desc='Total completion: ' , leave=lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.target_metric_key , lowerCamelCase_ , args.repeat_times , lowerCamelCase_ , args.verbose , ) )
process_results(lowerCamelCase_ , args.target_metric_key , lowerCamelCase_ , args.base_variation , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 716 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 0 |
import math
from datetime import datetime, timedelta
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> datetime:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = year % 19
SCREAMING_SNAKE_CASE_ : Union[str, Any] = year % 4
SCREAMING_SNAKE_CASE_ : Optional[int] = year % 7
SCREAMING_SNAKE_CASE_ : str = math.floor(year / 1_00 )
SCREAMING_SNAKE_CASE_ : Dict = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE_ : Tuple = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE_ : List[str] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE_ : Any = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE_ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 18 )
else:
return datetime(lowerCamelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
UpperCamelCase__ : List[Any] = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ : List[Any] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.full((len(lowerCamelCase_ ), sequence_length, 2) , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.full((len(lowerCamelCase_ ), sequence_length) , lowerCamelCase_ )
for i, tensor in enumerate(lowerCamelCase_ ):
if padding_side == "right":
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = tensor[:sequence_length]
else:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ord(lowerCamelCase_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = unicodedata.category(lowerCamelCase_ )
if cat.startswith('P' ):
return True
return False
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : PreTrainedTokenizerBase
__a : Union[bool, str, PaddingStrategy] = True
__a : Optional[int] = None
__a : Optional[int] = None
__a : int = -1_00
__a : str = "pt"
def snake_case ( self ,snake_case__ ):
import torch
SCREAMING_SNAKE_CASE_ : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
SCREAMING_SNAKE_CASE_ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.pad(
snake_case__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' if labels is None else None ,)
if labels is None:
return batch
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(batch['entity_ids'] ).shape[1]
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.padding_side
if padding_side == "right":
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
list(snake_case__ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) for label in labels
]
else:
SCREAMING_SNAKE_CASE_ : str = [
[self.label_pad_token_id] * (sequence_length - len(snake_case__ )) + list(snake_case__ ) for label in labels
]
SCREAMING_SNAKE_CASE_ : List[Any] = [feature['ner_tags'] for feature in features]
SCREAMING_SNAKE_CASE_ : Any = padding_tensor(snake_case__ ,-1 ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = [feature['original_entity_spans'] for feature in features]
SCREAMING_SNAKE_CASE_ : Optional[Any] = padding_tensor(snake_case__ ,(-1, -1) ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = {k: torch.tensor(snake_case__ ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
UpperCamelCase__ : Tuple = '''facebook/wmt19-en-de'''
UpperCamelCase__ : Any = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
UpperCamelCase__ : int = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
UpperCamelCase__ : List[str] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCamelCase__ : Optional[int] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
UpperCamelCase__ : Any = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
UpperCamelCase__ : Dict = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "informer"
__a : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self ,snake_case__ = None ,snake_case__ = None ,snake_case__ = "student_t" ,snake_case__ = "nll" ,snake_case__ = 1 ,snake_case__ = None ,snake_case__ = "mean" ,snake_case__ = 0 ,snake_case__ = 0 ,snake_case__ = 0 ,snake_case__ = 0 ,snake_case__ = None ,snake_case__ = None ,snake_case__ = 64 ,snake_case__ = 32 ,snake_case__ = 32 ,snake_case__ = 2 ,snake_case__ = 2 ,snake_case__ = 2 ,snake_case__ = 2 ,snake_case__ = True ,snake_case__ = "gelu" ,snake_case__ = 0.05 ,snake_case__ = 0.1 ,snake_case__ = 0.1 ,snake_case__ = 0.1 ,snake_case__ = 0.1 ,snake_case__ = 100 ,snake_case__ = 0.02 ,snake_case__=True ,snake_case__ = "prob" ,snake_case__ = 5 ,snake_case__ = True ,**snake_case__ ,):
# time series specific configuration
SCREAMING_SNAKE_CASE_ : Dict = prediction_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = context_length or prediction_length
SCREAMING_SNAKE_CASE_ : Tuple = distribution_output
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_size
SCREAMING_SNAKE_CASE_ : Dict = num_time_features
SCREAMING_SNAKE_CASE_ : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE_ : List[str] = scaling
SCREAMING_SNAKE_CASE_ : int = num_dynamic_real_features
SCREAMING_SNAKE_CASE_ : Optional[int] = num_static_real_features
SCREAMING_SNAKE_CASE_ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cardinality
else:
SCREAMING_SNAKE_CASE_ : List[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE_ : int = embedding_dimension
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE_ : int = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE_ : Optional[int] = d_model
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : List[str] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Any = encoder_layers
SCREAMING_SNAKE_CASE_ : Tuple = decoder_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = dropout
SCREAMING_SNAKE_CASE_ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE_ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE_ : Dict = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : int = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Dict = activation_function
SCREAMING_SNAKE_CASE_ : str = init_std
SCREAMING_SNAKE_CASE_ : List[Any] = use_cache
# Informer
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = sampling_factor
SCREAMING_SNAKE_CASE_ : str = distil
super().__init__(is_encoder_decoder=snake_case__ ,**snake_case__ )
@property
def snake_case ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
UpperCamelCase__ : Optional[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase__ : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __UpperCAmelCase ( lowerCamelCase_ : dict[int, list[int]] , lowerCamelCase_ : int , lowerCamelCase_ : list[bool] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : int = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
order.append(lowerCamelCase_ )
return order
def __UpperCAmelCase ( lowerCamelCase_ : dict[int, list[int]] , lowerCamelCase_ : int , lowerCamelCase_ : list[bool] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return component
def __UpperCAmelCase ( lowerCamelCase_ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCamelCase_ ) * [False]
SCREAMING_SNAKE_CASE_ : dict[int, list[int]] = {vert: [] for vert in range(len(lowerCamelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i, was_visited in enumerate(lowerCamelCase_ ):
if not was_visited:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCamelCase_ ) * [False]
for i in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE_ : List[str] = order[len(lowerCamelCase_ ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE_ : Any = find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
components_list.append(lowerCamelCase_ )
return components_list
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 701 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
UpperCamelCase__ : List[str] = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCamelCase__ : str = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCamelCase__ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
UpperCamelCase__ : Optional[Any] = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
UpperCamelCase__ : Any = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
UpperCamelCase__ : str = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
UpperCamelCase__ : int = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
UpperCamelCase__ : int = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
UpperCamelCase__ : Tuple = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
UpperCamelCase__ : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
UpperCamelCase__ : Optional[int] = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
UpperCamelCase__ : Dict = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
UpperCamelCase__ : List[Any] = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
UpperCamelCase__ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase__ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase__ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase__ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase__ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase__ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase__ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase__ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : Dict = FLAX_MODEL_MAPPING
UpperCamelCase__ : int = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase__ : Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : List[str] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase__ : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase__ : str = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : Any = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase__ : List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : Any = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase__ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase__ : Tuple = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : str = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase__ : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : Dict = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase__ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : Any = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase__ : Optional[int] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase__ : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
__a : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase__ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 703 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 0 |
from manim import *
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = Rectangle(height=0.5 ,width=0.5 )
SCREAMING_SNAKE_CASE_ : List[str] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE_ : List[str] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : str = VGroup(*snake_case__ ).arrange(snake_case__ ,buff=0 )
SCREAMING_SNAKE_CASE_ : Any = VGroup(*snake_case__ ).arrange(snake_case__ ,buff=0 )
SCREAMING_SNAKE_CASE_ : Any = VGroup(snake_case__ ,snake_case__ ).arrange(snake_case__ ,buff=0 )
SCREAMING_SNAKE_CASE_ : int = Text('CPU' ,font_size=24 )
SCREAMING_SNAKE_CASE_ : Dict = Group(snake_case__ ,snake_case__ ).arrange(snake_case__ ,buff=0.5 ,aligned_edge=snake_case__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = [mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE_ : Tuple = VGroup(*snake_case__ ).arrange(snake_case__ ,buff=0 )
SCREAMING_SNAKE_CASE_ : Tuple = Text('GPU' ,font_size=24 )
SCREAMING_SNAKE_CASE_ : List[Any] = Group(snake_case__ ,snake_case__ ).arrange(snake_case__ ,buff=0.5 ,aligned_edge=snake_case__ )
gpu.align_to(snake_case__ ,snake_case__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : Dict = VGroup(*snake_case__ ).arrange(snake_case__ ,buff=0 )
SCREAMING_SNAKE_CASE_ : List[str] = Text('Model' ,font_size=24 )
SCREAMING_SNAKE_CASE_ : Any = Group(snake_case__ ,snake_case__ ).arrange(snake_case__ ,buff=0.5 ,aligned_edge=snake_case__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case__ ,run_time=1 ) ,Create(snake_case__ ,run_time=1 ) ,Create(snake_case__ ,run_time=1 ) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' ,font_size=24 ,)
SCREAMING_SNAKE_CASE_ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE_ : int = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case__ ,run_time=2.5 ) ,Write(snake_case__ ) ,Write(snake_case__ ) )
self.add(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Tuple = []
for i, rect in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case__ ,opacity=0.7 )
cpu_target.move_to(snake_case__ )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE_ : int = 0.46 / 4
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=snake_case__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=snake_case__ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=snake_case__ ,buff=0.0 )
cpu_targs.append(snake_case__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case__ ) )
second_animations.append(MoveToTarget(snake_case__ ,run_time=1.5 ) )
self.play(*snake_case__ )
self.play(*snake_case__ )
self.wait()
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=8 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,):
super().__init__()
self.register_modules(
unet=snake_case__ ,scheduler=snake_case__ ,movq=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
if latents is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = randn_tensor(snake_case__ ,generator=snake_case__ ,device=snake_case__ ,dtype=snake_case__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
SCREAMING_SNAKE_CASE_ : Any = latents.to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def snake_case ( self ,snake_case__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE_ : Tuple = torch.device(F'cuda:{gpu_id}' )
SCREAMING_SNAKE_CASE_ : List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__=0 ):
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE_ : Any = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ : List[str] = cpu_offload_with_hook(snake_case__ ,snake_case__ ,prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self ):
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self ,snake_case__ ,snake_case__ ,snake_case__ = 512 ,snake_case__ = 512 ,snake_case__ = 100 ,snake_case__ = 4.0 ,snake_case__ = 1 ,snake_case__ = None ,snake_case__ = None ,snake_case__ = "pil" ,snake_case__ = True ,):
SCREAMING_SNAKE_CASE_ : str = self._execution_device
SCREAMING_SNAKE_CASE_ : Dict = guidance_scale > 1.0
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat(snake_case__ ,dim=0 )
SCREAMING_SNAKE_CASE_ : Tuple = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = torch.cat(snake_case__ ,dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Tuple = image_embeds.repeat_interleave(snake_case__ ,dim=0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = negative_image_embeds.repeat_interleave(snake_case__ ,dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=snake_case__ )
self.scheduler.set_timesteps(snake_case__ ,device=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ : int = self.unet.config.in_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = downscale_height_and_width(snake_case__ ,snake_case__ ,self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE_ : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,snake_case__ ,snake_case__ ,snake_case__ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ : int = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE_ : int = self.unet(
sample=snake_case__ ,timestep=snake_case__ ,encoder_hidden_states=snake_case__ ,added_cond_kwargs=snake_case__ ,return_dict=snake_case__ ,)[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
SCREAMING_SNAKE_CASE_ : Any = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : str = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ : int = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ : Dict = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : int = self.scheduler.step(
snake_case__ ,snake_case__ ,snake_case__ ,generator=snake_case__ ,)[0]
# post-processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.movq.decode(snake_case__ ,force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : Dict = image.clamp(0 ,1 )
SCREAMING_SNAKE_CASE_ : Tuple = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : List[str] = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[Any] = (PNDMScheduler,)
__a : Union[str, Any] = (("num_inference_steps", 50),)
def snake_case ( self ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**snake_case__ )
return config
def snake_case ( self ,snake_case__=0 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('num_inference_steps' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample
SCREAMING_SNAKE_CASE_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : str = scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : List[str] = new_scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_plms(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_scheduler.step_plms(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case ( self ):
pass
def snake_case ( self ,snake_case__=0 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('num_inference_steps' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Any = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : List[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[int] = new_scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_ : int = scheduler.step_plms(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Any = new_scheduler.step_plms(snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case ( self ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 10
SCREAMING_SNAKE_CASE_ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.prk_timesteps ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_plms(snake_case__ ,snake_case__ ,snake_case__ ).prev_sample
return sample
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('num_inference_steps' ,snake_case__ )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ ,'set_timesteps' ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ ,'set_timesteps' ):
SCREAMING_SNAKE_CASE_ : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE_ : List[str] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_ : Dict = scheduler.step_prk(snake_case__ ,0 ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step_prk(snake_case__ ,1 ,snake_case__ ,**snake_case__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step_plms(snake_case__ ,0 ,snake_case__ ,**snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step_plms(snake_case__ ,1 ,snake_case__ ,**snake_case__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def snake_case ( self ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def snake_case ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ : int = scheduler_class(**snake_case__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def snake_case ( self ):
for beta_start, beta_end in zip([0.0001, 0.001] ,[0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ ,beta_end=snake_case__ )
def snake_case ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def snake_case ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=snake_case__ )
def snake_case ( self ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=snake_case__ )
def snake_case ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
SCREAMING_SNAKE_CASE_ : int = 27
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler.step_prk(snake_case__ ,snake_case__ ,snake_case__ ).prev_sample
def snake_case ( self ):
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**snake_case__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.full_loop()
SCREAMING_SNAKE_CASE_ : List[str] = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.full_loop(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : Optional[int] = self.full_loop(set_alpha_to_one=snake_case__ ,beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(set_alpha_to_one=snake_case__ ,beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Any = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 706 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase_ :
__a : Dict = BlenderbotSmallConfig
__a : List[Any] = {}
__a : Any = "gelu"
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=False ,snake_case__=99 ,snake_case__=32 ,snake_case__=2 ,snake_case__=4 ,snake_case__=37 ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=20 ,snake_case__=2 ,snake_case__=1 ,snake_case__=0 ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pad_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bos_token_id
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
SCREAMING_SNAKE_CASE_ : List[str] = prepare_blenderbot_small_inputs_dict(snake_case__ ,snake_case__ ,snake_case__ )
return config, inputs_dict
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids[:1, :]
SCREAMING_SNAKE_CASE_ : int = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE_ : Dict = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE_ : Dict = 1
# first forward pass
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ ,attention_mask=snake_case__ ,head_mask=snake_case__ ,use_cache=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Tuple = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : List[str] = tf.concat([input_ids, next_tokens] ,axis=-1 )
SCREAMING_SNAKE_CASE_ : str = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ : List[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ ,snake_case__ ,rtol=1E-3 )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : str=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Union[str, Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[Any] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__a : str = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__a : Optional[Any] = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : List[str] = True
__a : Tuple = False
__a : List[Any] = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self ,config_class=snake_case__ )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
__a : str = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__a : Optional[Any] = "facebook/blenderbot_small-90M"
@cached_property
def snake_case ( self ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(self.src_text ,return_tensors='tf' )
SCREAMING_SNAKE_CASE_ : List[Any] = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 707 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 708 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 0 |
from __future__ import annotations
from collections import deque
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : list[dict] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(snake_case__ )
self.set_fail_transitions()
def snake_case ( self ,snake_case__ ,snake_case__ ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = 0
for character in keyword:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.find_next_state(snake_case__ ,snake_case__ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = next_state
self.adlist[current_state]["output"].append(snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while q:
SCREAMING_SNAKE_CASE_ : Dict = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.adlist[r]['fail_state']
while (
self.find_next_state(snake_case__ ,self.adlist[child]['value'] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE_ : Any = self.adlist[state]['fail_state']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.find_next_state(
snake_case__ ,self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Any = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : dict = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for i in range(len(snake_case__ ) ):
while (
self.find_next_state(snake_case__ ,string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE_ : Tuple = self.adlist[current_state]['fail_state']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.find_next_state(snake_case__ ,string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE_ : int = 0
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
result[key].append(i - len(snake_case__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : str = '''▁'''
UpperCamelCase__ : str = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
UpperCamelCase__ : Any = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
UpperCamelCase__ : List[Any] = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : str = VOCAB_FILES_NAMES
__a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__a : int = ["input_ids", "attention_mask"]
__a : List[int] = []
__a : List[int] = []
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="</s>" ,snake_case__="<pad>" ,snake_case__="<unk>" ,snake_case__="m2m100" ,snake_case__ = None ,snake_case__=8 ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : int = language_codes
SCREAMING_SNAKE_CASE_ : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
SCREAMING_SNAKE_CASE_ : List[str] = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case__ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case__ ,tgt_lang=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,sep_token=snake_case__ ,unk_token=snake_case__ ,pad_token=snake_case__ ,language_codes=snake_case__ ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Dict = vocab_file
SCREAMING_SNAKE_CASE_ : int = load_json(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : int = spm_file
SCREAMING_SNAKE_CASE_ : List[str] = load_spm(snake_case__ ,self.sp_model_kwargs )
SCREAMING_SNAKE_CASE_ : List[str] = len(self.encoder )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
self.get_lang_token(snake_case__ ): self.encoder_size + i for i, lang_code in enumerate(snake_case__ )
}
SCREAMING_SNAKE_CASE_ : Dict = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case__ )}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
SCREAMING_SNAKE_CASE_ : int = src_lang if src_lang is not None else 'en'
SCREAMING_SNAKE_CASE_ : Optional[int] = tgt_lang
SCREAMING_SNAKE_CASE_ : Dict = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
SCREAMING_SNAKE_CASE_ : Dict = num_madeup_words
@property
def snake_case ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def snake_case ( self ):
return self._src_lang
@src_lang.setter
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case ( self ,snake_case__ ):
return self.sp_model.encode(snake_case__ ,out_type=snake_case__ )
def snake_case ( self ,snake_case__ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case__ ,self.encoder[self.unk_token] )
def snake_case ( self ,snake_case__ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case__ ,self.unk_token )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Tuple = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
SCREAMING_SNAKE_CASE_ : Optional[int] = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ ,token_ids_a=snake_case__ ,already_has_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Tuple = None
return state
def __setstate__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : int = load_spm(self.spm_file ,self.sp_model_kwargs )
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Tuple = Path(snake_case__ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
SCREAMING_SNAKE_CASE_ : List[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,snake_case__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,snake_case__ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case__ ,'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (str(snake_case__ ), str(snake_case__ ))
def snake_case ( self ,snake_case__ ,snake_case__ = "en" ,snake_case__ = None ,snake_case__ = "ro" ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case__ ,snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE_ : Optional[int] = src_lang
SCREAMING_SNAKE_CASE_ : Optional[Any] = self(snake_case__ ,add_special_tokens=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.get_lang_id(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = tgt_lang_id
return inputs
def snake_case ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def snake_case ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = self.get_lang_token(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE_ : str = [self.cur_lang_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.eos_token_id]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_lang_token(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE_ : Any = [self.cur_lang_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.eos_token_id]
def snake_case ( self ,snake_case__ ):
return self.lang_code_to_token[lang]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_lang_token(snake_case__ )
return self.lang_token_to_id[lang_token]
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = sentencepiece.SentencePieceProcessor(**lowerCamelCase_ )
spm.Load(str(lowerCamelCase_ ) )
return spm
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' ) as f:
return json.load(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ) -> None:
"""simple docstring"""
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=2 )
| 710 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 0 |
from __future__ import annotations
UpperCamelCase__ : List[Any] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE_ : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = source_vertex
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = {self.source_vertex}
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : int = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE_ : Dict = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = vertex
queue.append(snake_case__ )
def snake_case ( self ,snake_case__ ):
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE_ : int = self.parent.get(snake_case__ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE_ : List[str] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCamelCase__ : Tuple = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 711 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCamelCase__ : List[str] = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : int = "rag"
__a : Any = True
def __init__( self ,snake_case__=None ,snake_case__=True ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=" / " ,snake_case__=" // " ,snake_case__=5 ,snake_case__=300 ,snake_case__=768 ,snake_case__=8 ,snake_case__="wiki_dpr" ,snake_case__="train" ,snake_case__="compressed" ,snake_case__=None ,snake_case__=None ,snake_case__=False ,snake_case__=False ,snake_case__=0.0 ,snake_case__=True ,snake_case__=False ,snake_case__=False ,snake_case__=False ,snake_case__=True ,snake_case__=None ,**snake_case__ ,):
super().__init__(
bos_token_id=snake_case__ ,pad_token_id=snake_case__ ,eos_token_id=snake_case__ ,decoder_start_token_id=snake_case__ ,forced_eos_token_id=snake_case__ ,is_encoder_decoder=snake_case__ ,prefix=snake_case__ ,vocab_size=snake_case__ ,**snake_case__ ,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('question_encoder' )
SCREAMING_SNAKE_CASE_ : List[str] = question_encoder_config.pop('model_type' )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('generator' )
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE_ : str = AutoConfig.for_model(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.for_model(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = reduce_loss
SCREAMING_SNAKE_CASE_ : List[str] = label_smoothing
SCREAMING_SNAKE_CASE_ : str = exclude_bos_score
SCREAMING_SNAKE_CASE_ : str = do_marginalize
SCREAMING_SNAKE_CASE_ : Dict = title_sep
SCREAMING_SNAKE_CASE_ : str = doc_sep
SCREAMING_SNAKE_CASE_ : Any = n_docs
SCREAMING_SNAKE_CASE_ : str = max_combined_length
SCREAMING_SNAKE_CASE_ : Tuple = dataset
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset_split
SCREAMING_SNAKE_CASE_ : List[Any] = index_name
SCREAMING_SNAKE_CASE_ : Optional[Any] = retrieval_vector_size
SCREAMING_SNAKE_CASE_ : Tuple = retrieval_batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = passages_path
SCREAMING_SNAKE_CASE_ : Dict = index_path
SCREAMING_SNAKE_CASE_ : Any = use_dummy_dataset
SCREAMING_SNAKE_CASE_ : List[Any] = output_retrieved
SCREAMING_SNAKE_CASE_ : Any = do_deduplication
SCREAMING_SNAKE_CASE_ : Dict = use_cache
if self.forced_eos_token_id is None:
SCREAMING_SNAKE_CASE_ : int = getattr(self.generator ,'forced_eos_token_id' ,snake_case__ )
@classmethod
def snake_case ( cls ,snake_case__ ,snake_case__ ,**snake_case__ ):
return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.question_encoder.to_dict()
SCREAMING_SNAKE_CASE_ : Tuple = self.generator.to_dict()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.__class__.model_type
return output
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "new-model"
if is_tf_available():
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = NewModelConfig
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = 'bert-base-cased'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'bert-base-cased'
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = TFAutoModelForPreTraining.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModelForCausalLM.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = TFAutoModelForCausalLM.from_pretrained(snake_case__ ,output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[str] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModelWithLMHead.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModelForMaskedLM.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = TFAutoModelForMaskedLM.from_pretrained(snake_case__ ,output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case__ ,output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = TFAutoModelForSequenceClassification.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
@require_tensorflow_probability
def snake_case ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = TFAutoModelForTableQuestionAnswering.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = TFAutoModelWithLMHead.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) ,14410 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = TFAutoModelWithLMHead.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) ,14410 )
def snake_case ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE_ : Any = ['FunnelBaseModel']
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFAutoModel.from_config(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = TFAutoModel.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
def snake_case ( self ):
try:
AutoConfig.register('new-model' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(snake_case__ ):
auto_class.register(snake_case__ ,snake_case__ )
auto_class.register(snake_case__ ,snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
auto_class.register(snake_case__ ,snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE_ : List[Any] = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE_ : List[Any] = auto_class.from_config(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = auto_class.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case ( self ):
with self.assertRaisesRegex(
snake_case__ ,'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModel.from_pretrained('bert-base' )
def snake_case ( self ):
with self.assertRaisesRegex(
snake_case__ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModel.from_pretrained(snake_case__ ,revision='aaaaaa' )
def snake_case ( self ):
with self.assertRaisesRegex(
snake_case__ ,'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' ,):
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def snake_case ( self ):
with self.assertRaisesRegex(snake_case__ ,'Use `from_pt=True` to load this model' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def snake_case ( self ):
# Make sure we have cached the model.
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE_ : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE_ : Dict = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 713 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 0 |
UpperCamelCase__ : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCAmelCase ( lowerCamelCase_ : bytes ) -> bytes:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = ''.join(bin(lowerCamelCase_ )[2:].zfill(8 ) for byte in data )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowerCamelCase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
SCREAMING_SNAKE_CASE_ : Optional[Any] = b'=' * ((6 - len(lowerCamelCase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCamelCase_ ) % 6)
else:
SCREAMING_SNAKE_CASE_ : int = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCamelCase_ ) , 6 ) ).encode()
+ padding
)
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> bytes:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict = (
'argument should be a bytes-like object or ASCII string, '
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(lowerCamelCase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
SCREAMING_SNAKE_CASE_ : int = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCamelCase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
SCREAMING_SNAKE_CASE_ : Dict = encoded_data[:-padding]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''.join(
bin(B64_CHARSET.index(lowerCamelCase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
SCREAMING_SNAKE_CASE_ : Any = ''.join(
bin(B64_CHARSET.index(lowerCamelCase_ ) )[2:].zfill(6 ) for char in encoded_data )
SCREAMING_SNAKE_CASE_ : Tuple = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCamelCase_ ) , 8 )
]
return bytes(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[Any]=0.9_9_9 , lowerCamelCase_ : Any="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase_ : Union[str, Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase_ : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ : Any = []
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase_ ) / alpha_bar_fn(lowerCamelCase_ ) , lowerCamelCase_ ) )
return torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
__a : Any = [e.name for e in KarrasDiffusionSchedulers]
__a : Union[str, Any] = 2
@register_to_config
def __init__( self ,snake_case__ = 1000 ,snake_case__ = 0.00085 ,snake_case__ = 0.012 ,snake_case__ = "linear" ,snake_case__ = None ,snake_case__ = "epsilon" ,snake_case__ = "linspace" ,snake_case__ = 0 ,):
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(snake_case__ ,dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ : Tuple = torch.linspace(snake_case__ ,snake_case__ ,snake_case__ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ : Dict = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,snake_case__ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ : int = betas_for_alpha_bar(snake_case__ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ : Tuple = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ : List[str] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__=None ):
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.timesteps
SCREAMING_SNAKE_CASE_ : List[str] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ : str = 1 if len(snake_case__ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ : Dict = timestep.cpu().item() if torch.is_tensor(snake_case__ ) else timestep
SCREAMING_SNAKE_CASE_ : List[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case ( self ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Any = self.index_for_timestep(snake_case__ )
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ : List[Any] = self.sigmas[step_index]
else:
SCREAMING_SNAKE_CASE_ : Any = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE_ : List[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = None ,):
SCREAMING_SNAKE_CASE_ : int = num_inference_steps
SCREAMING_SNAKE_CASE_ : List[str] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.linspace(0 ,num_train_timesteps - 1 ,snake_case__ ,dtype=snake_case__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ : Any = (np.arange(0 ,snake_case__ ) * step_ratio).round()[::-1].copy().astype(snake_case__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ : int = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ : List[str] = (np.arange(snake_case__ ,0 ,-step_ratio )).round().copy().astype(snake_case__ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.from_numpy(np.log(snake_case__ ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = np.interp(snake_case__ ,np.arange(0 ,len(snake_case__ ) ) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : str = torch.from_numpy(snake_case__ ).to(device=snake_case__ )
# interpolate sigmas
SCREAMING_SNAKE_CASE_ : Any = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ : str = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case__ ).startswith('mps' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(snake_case__ ).to(snake_case__ ,dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ : Any = torch.from_numpy(snake_case__ ).to(snake_case__ )
# interpolate timesteps
SCREAMING_SNAKE_CASE_ : str = self.sigma_to_t(snake_case__ ).to(snake_case__ ,dtype=timesteps.dtype )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
SCREAMING_SNAKE_CASE_ : str = torch.cat([timesteps[:1], interleaved_timesteps] )
SCREAMING_SNAKE_CASE_ : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ : List[Any] = defaultdict(snake_case__ )
def snake_case ( self ,snake_case__ ):
# get log sigma
SCREAMING_SNAKE_CASE_ : Any = sigma.log()
# get distribution
SCREAMING_SNAKE_CASE_ : Tuple = log_sigma - self.log_sigmas[:, None]
# get sigmas range
SCREAMING_SNAKE_CASE_ : int = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ : Dict = low_idx + 1
SCREAMING_SNAKE_CASE_ : List[str] = self.log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ : int = self.log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ : Any = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ : Any = w.clamp(0 ,1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ : Any = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ : Union[str, Any] = t.view(sigma.shape )
return t
@property
def snake_case ( self ):
return self.sample is None
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = True ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.index_for_timestep(snake_case__ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ : str = timestep.cpu().item() if torch.is_tensor(snake_case__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ : List[str] = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ : List[str] = self.sigmas_interpol[step_index + 1]
SCREAMING_SNAKE_CASE_ : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
SCREAMING_SNAKE_CASE_ : Any = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ : List[str] = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE_ : str = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ : int = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE_ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ : List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ : Dict = sigma_interpol - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
SCREAMING_SNAKE_CASE_ : Tuple = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
SCREAMING_SNAKE_CASE_ : Any = sigma_next - sigma_hat
SCREAMING_SNAKE_CASE_ : Dict = self.sample
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Dict = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE_ : Dict = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case__ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ : Dict = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ : Optional[int] = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ : List[str] = [self.index_for_timestep(snake_case__ ,snake_case__ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ : List[Any] = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : List[str]=None , ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCamelCase_ )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase_ )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=False ,snake_case__=99 ,snake_case__=16 ,snake_case__=2 ,snake_case__=4 ,snake_case__=4 ,snake_case__="relu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=20 ,snake_case__=2 ,snake_case__=1 ,snake_case__=0 ,):
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = seq_length
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = eos_token_id
SCREAMING_SNAKE_CASE_ : Dict = pad_token_id
SCREAMING_SNAKE_CASE_ : List[Any] = bos_token_id
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_mam_aaa_inputs_dict(snake_case__ ,snake_case__ ,snake_case__ )
return config, inputs_dict
def snake_case ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,encoder_layerdrop=self.encoder_layerdrop ,decoder_layerdrop=self.decoder_layerdrop ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,)
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = MaMaaaModel(config=snake_case__ ).get_decoder().to(snake_case__ ).eval()
SCREAMING_SNAKE_CASE_ : Dict = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE_ : int = inputs_dict['attention_mask']
SCREAMING_SNAKE_CASE_ : Tuple = inputs_dict['head_mask']
# first forward pass
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ ,attention_mask=snake_case__ ,head_mask=snake_case__ ,use_cache=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Any = torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(snake_case__ ,attention_mask=snake_case__ )['last_hidden_state']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,past_key_values=snake_case__ )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-2 ) )
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = MaMaaaModel(config=snake_case__ ).to(snake_case__ ).eval()
SCREAMING_SNAKE_CASE_ : int = model(**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Dict = model.get_encoder()
encoder.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = MaMaaaEncoder.from_pretrained(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = encoder(inputs_dict['input_ids'] ,attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Dict = model.get_decoder()
decoder.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = MaMaaaDecoder.from_pretrained(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoder(
input_ids=inputs_dict['decoder_input_ids'] ,attention_mask=inputs_dict['decoder_attention_mask'] ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=inputs_dict['attention_mask'] ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__a : Dict = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__a : Optional[int] = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__a : int = True
__a : str = True
__a : Optional[Any] = False
__a : str = False
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE_ : Any = ConfigTester(self ,config_class=snake_case__ )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class.from_pretrained(snake_case__ ,output_loading_info=snake_case__ )
self.assertEqual(info['missing_keys'] ,[] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = copy.deepcopy(self._prepare_for_class(snake_case__ ,snake_case__ ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs['input_ids']
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE_ : List[str] = inputs['input_ids']
SCREAMING_SNAKE_CASE_ : List[str] = inputs.get('decoder_input_ids' ,snake_case__ )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = wte(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : List[str] = wte(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = wte(snake_case__ )
with torch.no_grad():
model(**snake_case__ )[0]
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Any = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = MaMaaaForConditionalGeneration(snake_case__ ).eval().to(snake_case__ )
if torch_device == "cuda":
model.half()
model.generate(snake_case__ ,attention_mask=snake_case__ )
model.generate(num_beams=4 ,do_sample=snake_case__ ,early_stopping=snake_case__ ,num_return_sequences=3 )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return torch.tensor(lowerCamelCase_ , dtype=torch.long , device=lowerCamelCase_ )
UpperCamelCase__ : List[str] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case ( self ):
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
SCREAMING_SNAKE_CASE_ : Any = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config ,snake_case__ ,snake_case__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : str = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape ,snake_case__ )
# change to expected output here
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] ,device=snake_case__ )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case__ ,atol=snake_case__ ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case__ )
# change to intended input
SCREAMING_SNAKE_CASE_ : Any = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
SCREAMING_SNAKE_CASE_ : str = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_mam_aaa_inputs_dict(model.config ,snake_case__ ,snake_case__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : List[str] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape ,snake_case__ )
# change to expected output here
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] ,device=snake_case__ )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case__ ,atol=snake_case__ ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ,src_lang='fr' ,tgt_lang='en' )
SCREAMING_SNAKE_CASE_ : List[str] = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(snake_case__ ,padding=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : str = model.generate(
input_ids=dct['input_ids'].to(snake_case__ ) ,attention_mask=dct['attention_mask'].to(snake_case__ ) ,num_beams=5 ,forced_bos_token_id=tokenizer.get_lang_id('en' ) ,)
SCREAMING_SNAKE_CASE_ : List[str] = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.batch_decode(
hypotheses_batch.tolist() ,clean_up_tokenization_spaces=snake_case__ ,skip_special_tokens=snake_case__ )
assert generated == expected_en
| 716 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
__a : int = "maskformer-swin"
__a : Optional[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self ,snake_case__=224 ,snake_case__=4 ,snake_case__=3 ,snake_case__=96 ,snake_case__=[2, 2, 6, 2] ,snake_case__=[3, 6, 12, 24] ,snake_case__=7 ,snake_case__=4.0 ,snake_case__=True ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=0.1 ,snake_case__="gelu" ,snake_case__=False ,snake_case__=0.02 ,snake_case__=1E-5 ,snake_case__=None ,snake_case__=None ,**snake_case__ ,):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Any = embed_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = depths
SCREAMING_SNAKE_CASE_ : Dict = len(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = num_heads
SCREAMING_SNAKE_CASE_ : Dict = window_size
SCREAMING_SNAKE_CASE_ : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE_ : Tuple = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : int = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ : Tuple = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
SCREAMING_SNAKE_CASE_ : Any = ['stem'] + [F'stage{idx}' for idx in range(1 ,len(snake_case__ ) + 1 )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=snake_case__ ,out_indices=snake_case__ ,stage_names=self.stage_names )
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=3 ,snake_case__=32 ,snake_case__=3 ,snake_case__=10 ,snake_case__=[10, 20, 30, 40] ,snake_case__=[1, 1, 2, 1] ,snake_case__=True ,snake_case__=True ,snake_case__="relu" ,snake_case__=3 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = embeddings_size
SCREAMING_SNAKE_CASE_ : int = hidden_sizes
SCREAMING_SNAKE_CASE_ : Optional[int] = depths
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : Tuple = use_labels
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = scope
SCREAMING_SNAKE_CASE_ : Any = len(snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Any = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFRegNetModel(config=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = model(snake_case__ ,training=snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFRegNetForImageClassification(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,labels=snake_case__ ,training=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__a : Union[str, Any] = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__a : Optional[Any] = False
__a : Optional[int] = False
__a : Dict = False
__a : int = False
__a : Optional[int] = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self ,config_class=snake_case__ ,has_text_modality=snake_case__ )
def snake_case ( self ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def snake_case ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 ,reason='TF does not support backprop for grouped convolutions on CPU.' ,)
@slow
def snake_case ( self ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def snake_case ( self ):
pass
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
def check_hidden_states_output(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(**self._prepare_for_class(snake_case__ ,snake_case__ ) ,training=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_ : str = layer_type
SCREAMING_SNAKE_CASE_ : Optional[int] = True
check_hidden_states_output(snake_case__ ,snake_case__ ,snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Dict = True
check_hidden_states_output(snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__={} ):
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,return_dict=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,return_dict=snake_case__ ,**snake_case__ ).to_tuple()
def recursive_check(snake_case__ ,snake_case__ ):
if isinstance(snake_case__ ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case__ ,snake_case__ ):
recursive_check(snake_case__ ,snake_case__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(snake_case__ ,snake_case__ ) ) ,msg=(
'Tuple and dict output are not equal. Difference:'
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) ,)
recursive_check(snake_case__ ,snake_case__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._prepare_for_class(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = self._prepare_for_class(snake_case__ ,snake_case__ )
check_equivalence(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(snake_case__ ,snake_case__ ,return_labels=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(snake_case__ ,snake_case__ ,return_labels=snake_case__ )
check_equivalence(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(snake_case__ ,snake_case__ )
check_equivalence(snake_case__ ,snake_case__ ,snake_case__ ,{'output_hidden_states': True} )
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(snake_case__ ,snake_case__ ,return_labels=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self._prepare_for_class(snake_case__ ,snake_case__ ,return_labels=snake_case__ )
check_equivalence(snake_case__ ,snake_case__ ,snake_case__ ,{'output_hidden_states': True} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Any = TFRegNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case ( self ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : str = image_processor(images=snake_case__ ,return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**snake_case__ ,training=snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] ,snake_case__ ,atol=1E-4 )
| 718 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : Dict = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
'do_convert_rgb': True,
}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,snake_case__ )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(snake_case__ ,snake_case__ )
def snake_case ( self ,**snake_case__ ):
return BertTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [Image.fromarray(np.moveaxis(snake_case__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,snake_case__ )
self.assertIsInstance(processor_fast.tokenizer ,snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,snake_case__ )
self.assertIsInstance(processor_fast.image_processor ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer(cls_token='(CLS)' ,sep_token='(SEP)' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname ,cls_token='(CLS)' ,sep_token='(SEP)' ,do_normalize=snake_case__ )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(snake_case__ ,return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Tuple = processor(images=snake_case__ ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : str = processor(text=snake_case__ ,images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : List[str] = processor.batch_decode(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=snake_case__ ,image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = processor(text=snake_case__ ,images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
__a : List[str] = "pixel_values"
__a : Tuple = False
__a : Tuple = TimmBackboneConfig
def __init__( self ,snake_case__ ,**snake_case__ ):
requires_backends(self ,'timm' )
super().__init__(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(snake_case__ ,'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
SCREAMING_SNAKE_CASE_ : int = getattr(snake_case__ ,'use_pretrained_backbone' ,snake_case__ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
SCREAMING_SNAKE_CASE_ : Optional[int] = config.out_indices if getattr(snake_case__ ,'out_indices' ,snake_case__ ) is not None else (-1,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = timm.create_model(
config.backbone ,pretrained=snake_case__ ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=snake_case__ ,**snake_case__ ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
SCREAMING_SNAKE_CASE_ : str = self._backbone.return_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {layer['module']: str(snake_case__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case__ )
@classmethod
def snake_case ( cls ,snake_case__ ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('config' ,TimmBackboneConfig() )
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('use_timm_backbone' ,snake_case__ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('num_channels' ,config.num_channels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('features_only' ,config.features_only )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('use_pretrained_backbone' ,config.use_pretrained_backbone )
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('out_indices' ,config.out_indices )
SCREAMING_SNAKE_CASE_ : List[str] = TimmBackboneConfig(
backbone=snake_case__ ,num_channels=snake_case__ ,features_only=snake_case__ ,use_pretrained_backbone=snake_case__ ,out_indices=snake_case__ ,)
return super()._from_config(snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
pass
def snake_case ( self ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
SCREAMING_SNAKE_CASE_ : List[Any] = self._all_layers
SCREAMING_SNAKE_CASE_ : Tuple = self._backbone(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self._return_layers
SCREAMING_SNAKE_CASE_ : int = tuple(hidden_states[i] for i in self.out_indices )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._backbone(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Dict = tuple(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(snake_case__ ) if hidden_states is not None else None
if not return_dict:
SCREAMING_SNAKE_CASE_ : Tuple = (feature_maps,)
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case__ ,hidden_states=snake_case__ ,attentions=snake_case__ )
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCamelCase__ : Tuple = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
UpperCamelCase__ : Dict = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
UpperCamelCase__ : Optional[Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
SCREAMING_SNAKE_CASE_ : List[str] = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case ( self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def snake_case ( self ,snake_case__ ,snake_case__ ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(snake_case__ ,snake_case__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(snake_case__ ,snake_case__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(snake_case__ ,snake_case__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(snake_case__ ,snake_case__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 701 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = 13
SCREAMING_SNAKE_CASE_ : Any = 7
SCREAMING_SNAKE_CASE_ : Tuple = 30
SCREAMING_SNAKE_CASE_ : Optional[int] = self.seq_length + self.mem_len
SCREAMING_SNAKE_CASE_ : Any = 15
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = 99
SCREAMING_SNAKE_CASE_ : int = [10, 50, 80]
SCREAMING_SNAKE_CASE_ : List[Any] = 32
SCREAMING_SNAKE_CASE_ : Optional[Any] = 32
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : List[str] = 8
SCREAMING_SNAKE_CASE_ : Tuple = 128
SCREAMING_SNAKE_CASE_ : str = 2
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.vocab_size - 1
SCREAMING_SNAKE_CASE_ : List[Any] = 0.01
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = TFTransfoXLModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ ).to_tuple()
SCREAMING_SNAKE_CASE_ : str = {'input_ids': input_ids_a, 'mems': mems_a}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(snake_case__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = TFTransfoXLLMHeadModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ).to_tuple()
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids_a, 'labels': lm_labels}
SCREAMING_SNAKE_CASE_ : str = model(snake_case__ ).to_tuple()
SCREAMING_SNAKE_CASE_ : List[Any] = model([input_ids_a, mems_a] ).to_tuple()
SCREAMING_SNAKE_CASE_ : Any = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = TFTransfoXLForSequenceClassification(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE_) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : int = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__a : List[str] = () if is_tf_available() else ()
__a : Optional[Any] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__a : int = False
__a : Optional[Any] = False
__a : List[str] = False
__a : str = False
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = TFTransfoXLModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self ,config_class=snake_case__ ,d_embed=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*snake_case__ )
def snake_case ( self ):
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(snake_case__ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
SCREAMING_SNAKE_CASE_ : Any = model.get_output_embeddings()
assert isinstance(snake_case__ ,tf.keras.layers.Layer )
SCREAMING_SNAKE_CASE_ : Tuple = model.get_bias()
assert name is None
else:
SCREAMING_SNAKE_CASE_ : str = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE_ : List[str] = model.get_bias()
assert name is None
def snake_case ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Dict = TFTransfoXLModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def snake_case ( self ):
pass
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
SCREAMING_SNAKE_CASE_ : int = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
SCREAMING_SNAKE_CASE_ : str = model.generate(snake_case__ ,max_length=200 ,do_sample=snake_case__ )
self.assertListEqual(output_ids[0].numpy().tolist() ,snake_case__ )
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Tuple = "rwkv"
__a : int = {"max_position_embeddings": "context_length"}
def __init__( self ,snake_case__=50277 ,snake_case__=1024 ,snake_case__=4096 ,snake_case__=32 ,snake_case__=None ,snake_case__=None ,snake_case__=1E-5 ,snake_case__=0 ,snake_case__=0 ,snake_case__=6 ,snake_case__=False ,snake_case__=True ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Any = context_length
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : Any = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Tuple = rescale_every
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE_ : Any = bos_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
| 703 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
from __future__ import annotations
from collections.abc import Generator
def __UpperCAmelCase ( ) -> Generator[int, None, None]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict[int, int] = {}
SCREAMING_SNAKE_CASE_ : Any = 2
while True:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = factor_map.pop(lowerCamelCase_ , lowerCamelCase_ )
if factor:
SCREAMING_SNAKE_CASE_ : Tuple = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ : List[str] = factor
else:
SCREAMING_SNAKE_CASE_ : List[str] = prime
yield prime
prime += 1
def __UpperCAmelCase ( lowerCamelCase_ : float = 1E10 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = sieve()
SCREAMING_SNAKE_CASE_ : List[Any] = 1
while True:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = next(lowerCamelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCamelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase__ : Optional[int] = logging.getLogger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[Any] = "masked_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=0 ,snake_case__="topK" ,snake_case__="constant" ,snake_case__=0.0 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : str = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = pruning_method
SCREAMING_SNAKE_CASE_ : str = mask_init
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_scale
| 706 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase__ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCamelCase__ : str = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCamelCase__ : int = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : bool = False ) -> List[Any]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[str] = f.read()
SCREAMING_SNAKE_CASE_ : List[Any] = content.split('\n' )
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : str = 0
while line_idx < len(lowerCamelCase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
SCREAMING_SNAKE_CASE_ : Any = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
SCREAMING_SNAKE_CASE_ : Tuple = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
SCREAMING_SNAKE_CASE_ : str = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : _re_identifier.search(lowerCamelCase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowerCamelCase_ ) )
elif "\n".join(lowerCamelCase_ ) != content:
return True
def __UpperCAmelCase ( lowerCamelCase_ : bool = False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for f in os.listdir(lowerCamelCase_ ) if f.endswith('.py' )]
SCREAMING_SNAKE_CASE_ : int = [sort_auto_mapping(lowerCamelCase_ , overwrite=lowerCamelCase_ ) for fname in fnames]
if not overwrite and any(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = [f for f, d in zip(lowerCamelCase_ , lowerCamelCase_ ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowerCamelCase_ )}. Run `make style` to fix'
' this.' )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCamelCase__ : Tuple = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 707 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=30 ,snake_case__=2 ,snake_case__=3 ,snake_case__=True ,snake_case__=True ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=10 ,snake_case__=0.02 ,snake_case__=None ,snake_case__=2 ,):
SCREAMING_SNAKE_CASE_ : Tuple = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = image_size
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : str = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : str = scope
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Tuple = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : Tuple = num_patches + 1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = ViTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = ViTForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : List[str] = ViTForMaskedImageModeling(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = ViTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : List[str] = ViTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__a : Optional[int] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
__a : Dict = True
__a : Union[str, Any] = False
__a : str = False
__a : Any = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ViTModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConfigTester(self ,config_class=snake_case__ ,has_text_modality=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case ( self ):
pass
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ ,nn.Linear ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def snake_case ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : int = ViTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case ( self ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self.default_image_processor
SCREAMING_SNAKE_CASE_ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=snake_case__ ,return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,snake_case__ ,atol=1E-4 ) )
@slow
def snake_case ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE_ : Tuple = ViTModel.from_pretrained('facebook/dino-vits8' ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ViTImageProcessor.from_pretrained('facebook/dino-vits8' ,size=480 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(images=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Dict = inputs.pixel_values.to(snake_case__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,interpolate_pos_encoding=snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : str = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = ViTModel.from_pretrained('facebook/dino-vits8' ,torch_dtype=torch.floataa ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Any = prepare_img()
SCREAMING_SNAKE_CASE_ : Any = image_processor(images=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = inputs.pixel_values.to(snake_case__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ )
| 708 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ : List[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
inspect_dataset(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = path + '.py'
assert script_name in os.listdir(lowerCamelCase_ )
assert "__pycache__" not in os.listdir(lowerCamelCase_ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
inspect_metric(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = path + '.py'
assert script_name in os.listdir(lowerCamelCase_ )
assert "__pycache__" not in os.listdir(lowerCamelCase_ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_dataset_config_names(lowerCamelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = get_dataset_infos(lowerCamelCase_ )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE_ : List[Any] = expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE_ : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = get_dataset_infos(lowerCamelCase_ )
assert expected_config in infos
SCREAMING_SNAKE_CASE_ : Optional[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
get_dataset_split_names(lowerCamelCase_ , config_name=lowerCamelCase_ )
| 709 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase__ : Any = '''true'''
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=82 , lowerCamelCase_ : List[Any]=16 ) -> Optional[Any]:
"""simple docstring"""
set_seed(42 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = RegressionModel()
SCREAMING_SNAKE_CASE_ : str = deepcopy(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = RegressionDataset(length=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
return model, ddp_model, dataloader
def __UpperCAmelCase ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : Any=False ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
SCREAMING_SNAKE_CASE_ : Tuple = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCamelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : Dict = dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ : int ):
if use_longest:
return tokenizer.pad(lowerCamelCase_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase_ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return DataLoader(lowerCamelCase_ , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=16 )
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator(dispatch_batches=lowerCamelCase_ , split_batches=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = get_dataloader(lowerCamelCase_ , not dispatch_batches )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for batch in dataloader:
SCREAMING_SNAKE_CASE_ : int = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE_ : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase_ )
targs.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = torch.cat(lowerCamelCase_ ), torch.cat(lowerCamelCase_ )
return logits, targs
def __UpperCAmelCase ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : List[str]=82 , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : int=16 ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_basic_setup(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = generate_predictions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
assert (
len(lowerCamelCase_ ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase_ )}'
def __UpperCAmelCase ( lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = evaluate.load('glue' , 'mrpc' )
SCREAMING_SNAKE_CASE_ : List[Any] = get_mrpc_setup(lowerCamelCase_ , lowerCamelCase_ )
# First do baseline
SCREAMING_SNAKE_CASE_ : Union[str, Any] = setup['no']
model.to(lowerCamelCase_ )
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase_ )
with torch.inference_mode():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCamelCase_ , references=batch['labels'] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE_ : List[str] = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ : str = batch['labels']
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCamelCase_ , references=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator(split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowerCamelCase_ , lowerCamelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator(split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowerCamelCase_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
test_torch_metrics(lowerCamelCase_ , 5_12 )
accelerator.state._reset_state()
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 710 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(snake_case__ ) ,torch_builtin(snake_case__ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case__ ) ,gelu_new(snake_case__ ) ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_activation('gelu' )
SCREAMING_SNAKE_CASE_ : List[Any] = get_activation('gelu_10' )
SCREAMING_SNAKE_CASE_ : Dict = torch_builtin(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = geluaa(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = torch.where(y_gelu_aa < 10.0 ,1 ,0 )
self.assertTrue(torch.max(snake_case__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask ,y_gelu_aa * clipped_mask ) )
def snake_case ( self ):
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(snake_case__ ):
get_activation('bogus' )
with self.assertRaises(snake_case__ ):
get_activation(snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = get_activation('gelu' )
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : str = get_activation('gelu' )
self.assertEqual(acta.a ,1 )
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = acta.a
| 711 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 713 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 0 |
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(snake_case__ ) )
]
SCREAMING_SNAKE_CASE_ : Any = defaultdict(snake_case__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE_ : List[str] = (1 << len(snake_case__ )) - 1
def snake_case ( self ,snake_case__ ,snake_case__ ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE_ : List[Any] = self.count_ways_until(snake_case__ ,task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) ,task_no + 1 )
# save the value.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def snake_case ( self ,snake_case__ ):
# Store the list of persons for each task
for i in range(len(snake_case__ ) ):
for j in task_performed[i]:
self.task[j].append(snake_case__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 ,1 )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
UpperCamelCase__ : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileBertConfig.from_json_file(lowerCamelCase_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : Tuple = MobileBertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE_ : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 716 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.