code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __snake_case ( A__ ):
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = '''ctrl'''
lowerCAmelCase_ : str = ['''past_key_values''']
lowerCAmelCase_ : Optional[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Union[str, Any] , UpperCamelCase__ :List[str]=246_534 , UpperCamelCase__ :Tuple=256 , UpperCamelCase__ :Any=1_280 , UpperCamelCase__ :Dict=8_192 , UpperCamelCase__ :Dict=48 , UpperCamelCase__ :List[str]=16 , UpperCamelCase__ :Dict=0.1 , UpperCamelCase__ :Union[str, Any]=0.1 , UpperCamelCase__ :Optional[Any]=1E-6 , UpperCamelCase__ :Dict=0.02 , UpperCamelCase__ :List[Any]=True , **UpperCamelCase__ :Tuple , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = dff
_a = resid_pdrop
_a = embd_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
super().__init__(**UpperCamelCase__ )
| 388 |
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)')
return min_val if option else max_val
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int((number_a + number_a) / 2)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)')
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value')
def answer(_UpperCAmelCase) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...')
SCREAMING_SNAKE_CASE = lower
SCREAMING_SNAKE_CASE = higher
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase)
last_numbers.append(_UpperCAmelCase)
if answer(_UpperCAmelCase) == "low":
SCREAMING_SNAKE_CASE = number
elif answer(_UpperCAmelCase) == "high":
SCREAMING_SNAKE_CASE = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''')
print(F'''details : {last_numbers!s}''')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip())
guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = config_class
UpperCamelCase = has_text_modality
UpperCamelCase = kwargs
UpperCamelCase = common_properties
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
UpperCamelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(SCREAMING_SNAKE_CASE ):
try:
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , msg=f'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(SCREAMING_SNAKE_CASE ):
try:
UpperCamelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , msg=f'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
UpperCamelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE , "config.json" )
config_first.to_json_file(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.config_class.from_json_file(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
UpperCamelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
config_first.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE , subfolder=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
UpperCamelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
if self.config_class.is_composition:
return
UpperCamelCase = self.config_class()
self.parent.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.config_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) != value:
wrong_values.append((key, getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), value) )
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase = "\n".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 414 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__a : int = 0
__a : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__a : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__a : Union[str, Any] = tuple[int, int]
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
UpperCamelCase = pos_x
UpperCamelCase = pos_y
UpperCamelCase = (pos_y, pos_x)
UpperCamelCase = goal_x
UpperCamelCase = goal_y
UpperCamelCase = g_cost
UpperCamelCase = parent
UpperCamelCase = self.calculate_heuristic()
UpperCamelCase = self.g_cost + self.h_cost
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
UpperCamelCase = self.pos_x - self.goal_x
UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(SCREAMING_SNAKE_CASE ) + abs(SCREAMING_SNAKE_CASE )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE )
UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE )
UpperCamelCase = [self.start]
UpperCamelCase = []
UpperCamelCase = False
def __lowerCAmelCase ( self ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(SCREAMING_SNAKE_CASE )
self.closed_nodes.append(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_successors(SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
return [self.start.pos]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]:
"""simple docstring"""
UpperCamelCase = []
for action in delta:
UpperCamelCase = parent.pos_x + action[1]
UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[TPosition]:
"""simple docstring"""
UpperCamelCase = node
UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase = current_node.parent
path.reverse()
return path
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = AStar(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = AStar(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = False
def __lowerCAmelCase ( self ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE )
self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE )
UpperCamelCase = current_bwd_node
UpperCamelCase = current_fwd_node
UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE ),
self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
astar.open_nodes.append(SCREAMING_SNAKE_CASE )
return [self.fwd_astar.start.pos]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[TPosition]:
"""simple docstring"""
UpperCamelCase = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__a : List[Any] = (0, 0)
__a : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__a : str = time.time()
__a : Any = AStar(init, goal)
__a : List[Any] = a_star.search()
__a : Dict = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__a : List[str] = time.time()
__a : Optional[int] = BidirectionalAStar(init, goal)
__a : Any = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 414 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : str ):
a__ : Tuple = tempfile.mkdtemp()
# fmt: off
a__ : Optional[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
a__ : Dict = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
a__ : Optional[int] = {'''unk_token''': '''<unk>'''}
a__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
a__ : Optional[int] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
a__ : Any = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Any , **lowerCamelCase__ : List[Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , **lowerCamelCase__ : List[str] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase( self : List[str] ):
a__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a__ : Optional[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase( self : Dict ):
a__ : Any = self.get_tokenizer()
a__ : Optional[int] = self.get_rust_tokenizer()
a__ : Union[str, Any] = self.get_image_processor()
a__ : Optional[int] = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
a__ : List[str] = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ )
def _UpperCamelCase( self : Tuple ):
a__ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : List[str] = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
a__ : Optional[int] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Any = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : Optional[int] = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
a__ : Union[str, Any] = self.prepare_image_inputs()
a__ : Any = image_processor(lowerCamelCase__ , return_tensors="np" )
a__ : str = processor(images=lowerCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase( self : int ):
a__ : Optional[int] = self.get_image_processor()
a__ : Optional[int] = self.get_tokenizer()
a__ : int = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
a__ : Any = '''lower newer'''
a__ : List[Any] = processor(text=lowerCamelCase__ )
a__ : Tuple = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase( self : List[str] ):
a__ : Tuple = self.get_image_processor()
a__ : Optional[Any] = self.get_tokenizer()
a__ : int = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
a__ : Optional[Any] = '''lower newer'''
a__ : Dict = self.prepare_image_inputs()
a__ : Dict = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = self.get_image_processor()
a__ : List[Any] = self.get_tokenizer()
a__ : Optional[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
a__ : Any = self.prepare_image_inputs()
a__ : Optional[Any] = self.prepare_image_inputs()
a__ : List[str] = processor(images=lowerCamelCase__ , visual_prompt=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _UpperCamelCase( self : List[Any] ):
a__ : List[str] = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Optional[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
a__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Any = processor.batch_decode(lowerCamelCase__ )
a__ : Any = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 37 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case = KandinskyImgaImgPipeline
snake_case = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
snake_case = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
snake_case = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case = False
@property
def _lowercase ( self ):
return 32
@property
def _lowercase ( self ):
return 32
@property
def _lowercase ( self ):
return self.time_input_dim
@property
def _lowercase ( self ):
return self.time_input_dim * 4
@property
def _lowercase ( self ):
return 1_00
@property
def _lowercase ( self ):
snake_case_ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _lowercase ( self ):
torch.manual_seed(0 )
snake_case_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
snake_case_ = MultilingualCLIP(UpperCAmelCase_ )
snake_case_ = text_encoder.eval()
return text_encoder
@property
def _lowercase ( self ):
torch.manual_seed(0 )
snake_case_ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case_ = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def _lowercase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ):
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ):
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
snake_case_ = DDIMScheduler(**UpperCAmelCase_ )
snake_case_ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCAmelCase_ )
# create init_image
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("RGB" ).resize((2_56, 2_56) )
if str(UpperCAmelCase_ ).startswith("mps" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _lowercase ( self ):
snake_case_ = "cpu"
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**UpperCAmelCase_ )
snake_case_ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
snake_case_ = "A red cartoon frog, 4k"
snake_case_ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
snake_case_ = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
snake_case_ = pipeline(
UpperCAmelCase_ , image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 420 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """gpt_neox"""
def __init__( self , UpperCAmelCase_=5_04_32 , UpperCAmelCase_=61_44 , UpperCAmelCase_=44 , UpperCAmelCase_=64 , UpperCAmelCase_=2_45_76 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.25 , UpperCAmelCase_=1_00_00 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20_48 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = rotary_pct
snake_case_ = rotary_emb_base
snake_case_ = attention_dropout
snake_case_ = hidden_dropout
snake_case_ = classifier_dropout
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = tie_word_embeddings
snake_case_ = use_parallel_residual
snake_case_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
snake_case_ = self.rope_scaling.get("type" , UpperCAmelCase_ )
snake_case_ = self.rope_scaling.get("factor" , UpperCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 420 | 1 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def snake_case ( )-> int:
'''simple docstring'''
lowerCamelCase__ = Github(os.environ['GITHUB_TOKEN'] )
lowerCamelCase__ = g.get_repo('huggingface/diffusers' )
lowerCamelCase__ = repo.get_issues(state='open' )
for issue in open_issues:
lowerCamelCase__ = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
lowerCamelCase__ = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 510 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : str=64 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : int=64 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=5_12 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def _UpperCamelCase ( self : Dict ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Optional[Any] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = MPNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = MPNetForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MPNetForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = MPNetForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MPNetForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Any = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
a_ : Optional[Any] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Optional[Any] = False
a_ : Any = True
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = MPNetModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = MPNetModel.from_pretrained('microsoft/mpnet-base' )
lowerCamelCase__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 510 | 1 |
from maths.prime_factors import prime_factors
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 711 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ : Optional[int] = (3, 9, -1_1, 0, 7, 5, 1, -1)
a_ : str = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = 42
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a ):
'''simple docstring'''
lowerCamelCase = None
for i in sorted(__a , reverse=__a ):
lowerCamelCase = Node(__a , self.head )
def __iter__(self ):
'''simple docstring'''
lowerCamelCase = self.head
while node:
yield node.data
lowerCamelCase = node.next_node
def __len__(self ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__(self ):
'''simple docstring'''
return " -> ".join([str(__a ) for node in self] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Any = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 484 | 0 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: float , lowerCAmelCase: float ) -> float:
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 |
from statistics import mean, stdev
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list , lowerCAmelCase: int = 3 ) -> list:
_UpperCAmelCase : Tuple = min(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = max(lowerCAmelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase ) for x in data]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list , lowerCAmelCase: int = 3 ) -> list:
_UpperCAmelCase : Union[str, Any] = mean(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = stdev(lowerCAmelCase )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase ) for x in data]
| 300 | 1 |
'''simple docstring'''
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_UpperCamelCase = get_logger(__name__)
class lowerCamelCase__ :
'''simple docstring'''
A__ = '''dummy_data'''
A__ = '''datasets'''
A__ = False
def __init__( self : Dict , __A : str , __A : str , __A : Union[Version, str] , __A : Optional[str] = None , __A : bool = False , __A : bool = True , __A : Optional[List[Callable]] = None , ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = dataset_name
lowerCAmelCase__ = cache_dir
lowerCAmelCase__ = use_local_dummy_data
lowerCAmelCase__ = config
# download_callbacks take a single url as input
lowerCAmelCase__ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase__ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase__ = str(__A )
# to be downloaded
lowerCAmelCase__ = None
lowerCAmelCase__ = None
@property
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
lowerCAmelCase__ = self.download_dummy_data()
return self._dummy_file
@property
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase__ = cached_path(
__A , cache_dir=self.cache_dir , extract_compressed_file=__A , force_extract=__A )
return os.path.join(__A , self.dummy_file_name )
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
if self._bucket_url is None:
lowerCAmelCase__ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowercase__ ( self : Optional[Any] , __A : Any , *__A : Dict ) -> Any:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase__ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase__ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__A , __A ):
return self.create_dummy_data_dict(__A , __A )
elif isinstance(__A , (list, tuple) ):
return self.create_dummy_data_list(__A , __A )
else:
return self.create_dummy_data_single(__A , __A )
def lowercase__ ( self : str , __A : str , *__A : Dict ) -> Any:
'''simple docstring'''
return self.download_and_extract(__A )
def lowercase__ ( self : Union[str, Any] , __A : Dict , __A : Tuple ) -> Tuple:
'''simple docstring'''
return self.download_and_extract(__A )
def lowercase__ ( self : Tuple , __A : Optional[int] , *__A : Tuple , **__A : Tuple ) -> Tuple:
'''simple docstring'''
return path
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
return {}
def lowercase__ ( self : Union[str, Any] , __A : Optional[Any] , __A : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__A , __A ):
for single_url in single_urls:
download_callback(__A )
else:
lowerCAmelCase__ = single_urls
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__A , __A ):
lowerCAmelCase__ = [os.path.join(__A , urllib.parse.quote_plus(Path(__A ).name ) ) for x in single_urls]
else:
lowerCAmelCase__ = single_urls
lowerCAmelCase__ = os.path.join(__A , urllib.parse.quote_plus(Path(__A ).name ) )
lowerCAmelCase__ = value
# make sure that values are unique
if all(isinstance(__A , __A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase__ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase__ ( self : List[str] , __A : Tuple , __A : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase__ = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __A ) ) for url in data_url )
lowerCAmelCase__ = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase__ = [data_url[0]] * len(__A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase__ = os.path.join(__A , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__A )
return dummy_data_list
def lowercase__ ( self : Any , __A : Optional[int] , __A : Any ) -> List[str]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase__ = os.path.join(__A , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : str , __A : Optional[int] ) -> Dict:
'''simple docstring'''
def _iter_archive_members(__A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase__ = Path(self.dummy_file ).parent
lowerCAmelCase__ = path.relative_to(__A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase__ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__A )
lowerCAmelCase__ = Path(__A )
lowerCAmelCase__ = _iter_archive_members(__A ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__A ).as_posix(), file_path.open("""rb""" )
def lowercase__ ( self : Union[str, Any] , __A : Dict ) -> int:
'''simple docstring'''
if not isinstance(__A , __A ):
lowerCAmelCase__ = [paths]
for path in paths:
if os.path.isfile(__A ):
if os.path.basename(__A ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__A ):
if os.path.basename(__A ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__A ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__A , __A )
| 712 |
'''simple docstring'''
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> int:
assert column_title.isupper()
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCAmelCase_ ) - 1
lowerCAmelCase__ = 0
while index >= 0:
lowerCAmelCase__ = (ord(column_title[index] ) - 64) * pow(26 , UpperCAmelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 211 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : dict[int, int] = {}
UpperCAmelCase__ : Optional[int] = 2
while True:
UpperCAmelCase__ : Dict = factor_map.pop(__UpperCamelCase , __UpperCamelCase )
if factor:
UpperCAmelCase__ : Tuple = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase__ : int = factor
else:
UpperCAmelCase__ : Optional[Any] = prime
yield prime
prime += 1
def lowerCAmelCase ( __UpperCamelCase = 1e10 ):
'''simple docstring'''
UpperCAmelCase__ : Any = sieve()
UpperCAmelCase__ : Any = 1
while True:
UpperCAmelCase__ : List[Any] = next(__UpperCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 65 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
__lowercase = tempfile.mktemp()
with open(_lowerCamelCase , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , _lowerCamelCase )
__lowercase = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , _lowerCamelCase )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
__lowercase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class __a ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[Any]:
'''simple docstring'''
__lowercase = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id="test-tokenizer" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-tokenizer-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__lowercase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__lowercase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
__lowercase = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__lowercase = Trie()
__lowercase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ["AB", "C"] )
| 118 | 0 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : str ,_a : Any ,_a : List[Any] ):
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_a ) for s in shape] )}.npy"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : List[Any] ,_a : Optional[Any]=0 ,_a : Union[str, Any]=(4, 4, 64, 64) ,_a : Any=False ):
'''simple docstring'''
_a : List[str] = jnp.bfloataa if fpaa else jnp.floataa
_a : Dict = jnp.array(load_hf_numpy(self.get_file_format(_a ,_a ) ) ,dtype=_a )
return image
def __lowercase ( self : str ,_a : List[Any]=False ,_a : Dict="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_a : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
_a : Optional[Any] = 'bf16' if fpaa else None
_a, _a : Tuple = FlaxUNetaDConditionModel.from_pretrained(
_a ,subfolder='unet' ,dtype=_a ,revision=_a )
return model, params
def __lowercase ( self : Optional[int] ,_a : str=0 ,_a : Optional[Any]=(4, 77, 768) ,_a : int=False ):
'''simple docstring'''
_a : Tuple = jnp.bfloataa if fpaa else jnp.floataa
_a : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(_a ,_a ) ) ,dtype=_a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __lowercase ( self : str ,_a : Optional[int] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
_a, _a : Optional[Any] = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' ,fpaa=_a )
_a : Dict = self.get_latents(_a ,fpaa=_a )
_a : Any = self.get_encoder_hidden_states(_a ,fpaa=_a )
_a : Dict = model.apply(
{'params': params} ,_a ,jnp.array(_a ,dtype=jnp.intaa ) ,encoder_hidden_states=_a ,).sample
assert sample.shape == latents.shape
_a : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa )
_a : List[Any] = jnp.array(_a ,dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_a ,_a ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __lowercase ( self : Any ,_a : int ,_a : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
_a, _a : Tuple = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' ,fpaa=_a )
_a : List[str] = self.get_latents(_a ,shape=(4, 4, 96, 96) ,fpaa=_a )
_a : Optional[Any] = self.get_encoder_hidden_states(_a ,shape=(4, 77, 1024) ,fpaa=_a )
_a : Tuple = model.apply(
{'params': params} ,_a ,jnp.array(_a ,dtype=jnp.intaa ) ,encoder_hidden_states=_a ,).sample
assert sample.shape == latents.shape
_a : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa )
_a : List[str] = jnp.array(_a ,dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_a ,_a ,atol=1E-2 )
| 319 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a ,'embed_dim' ) )
self.parent.assertTrue(hasattr(_a ,'num_heads' ) )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : List[str] ,_a : Tuple=13 ,_a : Optional[int]=64 ,_a : List[Any]=3 ,_a : Union[str, Any]=[16, 48, 96] ,_a : List[Any]=[1, 3, 6] ,_a : Optional[Any]=[1, 2, 10] ,_a : List[Any]=[7, 3, 3] ,_a : Tuple=[4, 2, 2] ,_a : List[str]=[2, 1, 1] ,_a : int=[2, 2, 2] ,_a : List[Any]=[False, False, True] ,_a : List[Any]=[0.0, 0.0, 0.0] ,_a : Dict=0.02 ,_a : str=1E-12 ,_a : Optional[Any]=True ,_a : List[str]=True ,_a : List[str]=2 ,):
'''simple docstring'''
_a : Union[str, Any] = parent
_a : Optional[int] = batch_size
_a : int = image_size
_a : Tuple = patch_sizes
_a : str = patch_stride
_a : Optional[Any] = patch_padding
_a : str = is_training
_a : Dict = use_labels
_a : Optional[Any] = num_labels
_a : Any = num_channels
_a : str = embed_dim
_a : Optional[Any] = num_heads
_a : Optional[int] = stride_kv
_a : str = depth
_a : int = cls_token
_a : Optional[int] = attention_drop_rate
_a : Any = initializer_range
_a : int = layer_norm_eps
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def __lowercase ( self : List[str] ,_a : Tuple ,_a : str ,_a : Any ):
'''simple docstring'''
_a : Optional[Any] = CvtModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
_a : int = (self.image_size, self.image_size)
_a, _a : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_a : Dict = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_a : Union[str, Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def __lowercase ( self : Optional[int] ,_a : List[Any] ,_a : int ,_a : int ):
'''simple docstring'''
_a : Tuple = self.num_labels
_a : Any = CvtForImageClassification(_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = self.prepare_config_and_inputs()
_a, _a, _a : Union[str, Any] = config_and_inputs
_a : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : Optional[int] = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = False
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Dict = CvtModelTester(self )
_a : Optional[Any] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return
@unittest.skip(reason='Cvt does not output attentions' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : int = [*signature.parameters.keys()]
_a : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(_a : Optional[int] ,_a : Optional[int] ,_a : List[str] ):
_a : str = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a ,_a ) )
_a : List[Any] = outputs.hidden_states
_a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(_a ) ,_a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[Any] = True
check_hidden_states_output(_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : int = True
check_hidden_states_output(_a ,_a ,_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = CvtModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Dict = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_a : Any = self.default_image_processor
_a : Dict = prepare_img()
_a : int = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Any = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : Optional[int] = torch.tensor([0.9285, 0.9015, -0.3150] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 319 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
SCREAMING_SNAKE_CASE_ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE_ : Tuple = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE_ : Optional[int] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : Dict = Accelerator()
lowerCAmelCase : Union[str, Any] = (accelerator.state.process_index + 2, 10)
lowerCAmelCase : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
lowerCAmelCase : Tuple = ''
lowerCAmelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCAmelCase : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCAmelCase : List[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 511 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase : Optional[List[str]] = None
lowerCAmelCase : Optional[Any] = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase : str = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _A :
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE : ClassVar[str] = "PIL.Image.Image"
SCREAMING_SNAKE_CASE : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()})
SCREAMING_SNAKE_CASE : str = field(default='''Image''' , init=__magic_name__ , repr=__magic_name__)
def __call__( self ):
"""simple docstring"""
return self.pa_type
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = path.split('::' )[-1]
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )['repo_id']
SCREAMING_SNAKE_CASE_ : Optional[int] = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
SCREAMING_SNAKE_CASE_ : Tuple = None
with xopen(_SCREAMING_SNAKE_CASE , 'rb' , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE_ : Any = BytesIO(f.read() )
SCREAMING_SNAKE_CASE_ : Tuple = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def UpperCAmelCase ( self ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_ : str = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_ : int = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ : Optional[int] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
SCREAMING_SNAKE_CASE_ : str = storage.field('bytes' )
else:
SCREAMING_SNAKE_CASE_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
SCREAMING_SNAKE_CASE_ : Dict = storage.field('path' )
else:
SCREAMING_SNAKE_CASE_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ : int = f.read()
return bytes_
SCREAMING_SNAKE_CASE_ : Optional[int] = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ : Optional[int] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def A_ ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_ : Tuple = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_ : Dict = image.format
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(a , format=a )
return buffer.getvalue()
def A_ ( a ):
"""simple docstring"""
if hasattr(a , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(a )}
def A_ ( a ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = array.dtype
SCREAMING_SNAKE_CASE_ : Any = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_ : Optional[int] = dtype.kind
SCREAMING_SNAKE_CASE_ : str = dtype.itemsize
SCREAMING_SNAKE_CASE_ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_ : List[str] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_ : Tuple = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_ : int = dtype_byteorder + dtype_kind + str(a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.dtype(a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
SCREAMING_SNAKE_CASE_ : List[str] = PIL.Image.fromarray(array.astype(a ) )
return {"path": None, "bytes": image_to_bytes(a )}
def A_ ( a ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = first_non_null_value(a )
if isinstance(a , a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(a , np.ndarray ):
SCREAMING_SNAKE_CASE_ : int = no_op_if_value_is_null(a )
return [obj_to_image_dict_func(a ) for obj in objs]
elif isinstance(a , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = no_op_if_value_is_null(a )
return [obj_to_image_dict_func(a ) for obj in objs]
else:
return objs
else:
return objs
| 511 | 1 |
'''simple docstring'''
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 427 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( a , a ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(a ):
print(f'{i}\t\t{d}' )
def lowerCamelCase__ ( a , a , a ):
for j in range(a ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase__ ( a , a , a , a ):
__snake_case = [float('inf' )] * vertex_count
__snake_case = 0.0
for _ in range(vertex_count - 1 ):
for j in range(a ):
__snake_case , __snake_case , __snake_case = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
__snake_case = distance[u] + w
__snake_case = check_negative_cycle(a , a , a )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = int(input("""Enter number of vertices: """).strip())
_lowercase = int(input("""Enter number of edges: """).strip())
_lowercase = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
_lowercase , _lowercase , _lowercase = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
_lowercase = {"""src""": src, """dst""": dest, """weight""": weight}
_lowercase = int(input("""\nEnter shortest path source:""").strip())
_lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 427 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
_snake_case : List[Any] = {'mobilebert-uncased': 512}
_snake_case : Optional[int] = {}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = MobileBertTokenizer
def __init__( self : Any , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple="[UNK]" , lowerCAmelCase_ : Optional[int]="[SEP]" , lowerCAmelCase_ : Optional[int]="[PAD]" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any=None ) -> List[str]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
lowercase : Dict = None
lowercase : Union[str, Any] = {
'7B': 1_1_0_0_8,
'13B': 1_3_8_2_4,
'30B': 1_7_9_2_0,
'65B': 2_2_0_1_6,
'70B': 2_8_6_7_2,
}
lowercase : Tuple = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __a ( A__ , A__=1 , A__=256 ) -> List[str]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __a ( A__ ) -> Optional[int]:
with open(A__ , "r" ) as f:
return json.load(A__ )
def __a ( A__ , A__ ) -> Any:
with open(A__ , "w" ) as f:
json.dump(A__ , A__ )
def __a ( A__ , A__ , A__ , A__=True ) -> Union[str, Any]:
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase = os.path.join(A__ , "tmp" )
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase = read_json(os.path.join(A__ , "params.json" ) )
lowerCAmelCase = NUM_SHARDS[model_size]
lowerCAmelCase = params["n_layers"]
lowerCAmelCase = params["n_heads"]
lowerCAmelCase = n_heads // num_shards
lowerCAmelCase = params["dim"]
lowerCAmelCase = dim // n_heads
lowerCAmelCase = 10_000.0
lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , A__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCAmelCase = params["n_kv_heads"] # for GQA / MQA
lowerCAmelCase = n_heads_per_shard // num_key_value_heads
lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCAmelCase = n_heads
lowerCAmelCase = n_heads_per_shard
lowerCAmelCase = dim
# permute for sliced rotary
def permute(A__ , A__=n_heads , A__=dim , A__=dim ):
return w.view(A__ , dima // n_heads // 2 , 2 , A__ ).transpose(1 , 2 ).reshape(A__ , A__ )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCAmelCase = torch.load(os.path.join(A__ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
lowerCAmelCase = [
torch.load(os.path.join(A__ , f"consolidated.{i:02d}.pth" ) , map_location="cpu" )
for i in range(A__ )
]
lowerCAmelCase = 0
lowerCAmelCase = {"weight_map": {}}
for layer_i in range(A__ ):
lowerCAmelCase = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
lowerCAmelCase = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCAmelCase = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(A__ , A__ , A__ )
for i in range(A__ )
] , dim=0 , ).reshape(A__ , A__ ) )
lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
A__ , A__ , A__ )
for i in range(A__ )
] , dim=0 , ).reshape(A__ , A__ ) , A__ , A__ , A__ , )
lowerCAmelCase = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
A__ , A__ , A__ )
for i in range(A__ )
] , dim=0 , ).reshape(A__ , A__ )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(A__ )] , dim=1 )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(A__ )] , dim=0 )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(A__ )] , dim=1 )
lowerCAmelCase = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(A__ )] , dim=0 )
lowerCAmelCase = inv_freq
for k, v in state_dict.items():
lowerCAmelCase = filename
param_count += v.numel()
torch.save(A__ , os.path.join(A__ , A__ ) )
lowerCAmelCase = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
lowerCAmelCase = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
lowerCAmelCase = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(A__ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(A__ )] , dim=0 ),
}
for k, v in state_dict.items():
lowerCAmelCase = filename
param_count += v.numel()
torch.save(A__ , os.path.join(A__ , A__ ) )
# Write configs
lowerCAmelCase = {"total_size": param_count * 2}
write_json(A__ , os.path.join(A__ , "pytorch_model.bin.index.json" ) )
lowerCAmelCase = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
lowerCAmelCase = params["multiple_of"] if "multiple_of" in params else 256
lowerCAmelCase = LlamaConfig(
hidden_size=A__ , intermediate_size=compute_intermediate_size(A__ , A__ , A__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=A__ , )
config.save_pretrained(A__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
lowerCAmelCase = LlamaForCausalLM.from_pretrained(A__ , torch_dtype=torch.floataa , low_cpu_mem_usage=A__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(A__ , safe_serialization=A__ )
shutil.rmtree(A__ )
def __a ( A__ , A__ ) -> Any:
# Initialize the tokenizer based on the `spm` model
lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
lowerCAmelCase = tokenizer_class(A__ )
tokenizer.save_pretrained(A__ )
def __a ( ) -> Any:
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=A__ , help="Whether or not to save using `safetensors`." )
lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCAmelCase = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , A__ )
if __name__ == "__main__":
main()
| 649 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase_ = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_=13, A_=10, A_=3, A_=2, A_=2, A_=2, A_=True, A_=True, A_=32, A_=5, A_=4, A_=37, A_="gelu", A_=0.1, A_=0.1, A_=10, A_=0.02, A_=0.9, A_=None, ) -> int:
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =image_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =tubelet_size
UpperCAmelCase__ =num_frames
UpperCAmelCase__ =is_training
UpperCAmelCase__ =use_labels
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =type_sequence_label_size
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =mask_ratio
UpperCAmelCase__ =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
UpperCAmelCase__ =(image_size // patch_size) ** 2
UpperCAmelCase__ =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
UpperCAmelCase__ =int(mask_ratio * self.seq_length )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ =None
if self.use_labels:
UpperCAmelCase__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase__ =self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=A_, initializer_range=self.initializer_range, )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Dict:
UpperCAmelCase__ =VideoMAEModel(config=A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =VideoMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCAmelCase__ =torch.ones((self.num_masks,) )
UpperCAmelCase__ =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
UpperCAmelCase__ =mask.expand(self.batch_size, -1 ).bool()
UpperCAmelCase__ =model(A_, A_ )
# model only returns predictions for masked patches
UpperCAmelCase__ =mask.sum().item()
UpperCAmelCase__ =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =config_and_inputs
UpperCAmelCase__ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( a, a, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__UpperCamelCase = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase__ =VideoMAEModelTester(self )
UpperCAmelCase__ =ConfigTester(self, config_class=A_, has_text_modality=A_, hidden_size=37 )
def __UpperCAmelCase ( self, A_, A_, A_=False ) -> Optional[int]:
UpperCAmelCase__ =copy.deepcopy(A_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCAmelCase__ =torch.ones((self.model_tester.num_masks,) )
UpperCAmelCase__ =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
UpperCAmelCase__ =mask.expand(self.model_tester.batch_size, -1 ).bool()
UpperCAmelCase__ =bool_masked_pos.to(A_ )
if return_labels:
if model_class in [
*get_values(A_ ),
]:
UpperCAmelCase__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A_ )
return inputs_dict
def __UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_, nn.Linear ) )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
UpperCAmelCase__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ =[*signature.parameters.keys()]
UpperCAmelCase__ =["pixel_values"]
self.assertListEqual(arg_names[:1], A_ )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
@slow
def __UpperCAmelCase ( self ) -> int:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ =VideoMAEModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __UpperCAmelCase ( self ) -> Tuple:
if not self.has_attentions:
pass
else:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =True
for model_class in self.all_model_classes:
UpperCAmelCase__ =self.model_tester.seq_length - self.model_tester.num_masks
UpperCAmelCase__ =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
UpperCAmelCase__ =True
UpperCAmelCase__ =False
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(A_ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(A_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
UpperCAmelCase__ =len(A_ )
# Check attention is always last and order is fine
UpperCAmelCase__ =True
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
self.assertEqual(out_len + 1, len(A_ ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(A_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def __UpperCAmelCase ( self ) -> Dict:
def check_hidden_states_output(A_, A_, A_ ):
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =outputs.hidden_states
UpperCAmelCase__ =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(A_ ), A_ )
UpperCAmelCase__ =self.model_tester.seq_length - self.model_tester.num_masks
UpperCAmelCase__ =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =True
check_hidden_states_output(A_, A_, A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ =True
check_hidden_states_output(A_, A_, A_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCAmelCase__ =np.load(A )
return list(A )
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> Any:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
A_ )
UpperCAmelCase__ =self.default_image_processor
UpperCAmelCase__ =prepare_video()
UpperCAmelCase__ =image_processor(A_, return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ =model(**A_ )
# verify the logits
UpperCAmelCase__ =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, A_ )
UpperCAmelCase__ =torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A_, atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(A_ )
UpperCAmelCase__ =self.default_image_processor
UpperCAmelCase__ =prepare_video()
UpperCAmelCase__ =image_processor(A_, return_tensors="pt" ).to(A_ )
# add boolean mask, indicating which patches to mask
UpperCAmelCase__ =hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt" )
UpperCAmelCase__ =torch.load(A_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ =model(**A_ )
# verify the logits
UpperCAmelCase__ =torch.Size([1, 1408, 1536] )
UpperCAmelCase__ =torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]], device=A_ )
self.assertEqual(outputs.logits.shape, A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], A_, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
UpperCAmelCase__ =torch.tensor([0.51_42], device=A_ )
self.assertTrue(torch.allclose(outputs.loss, A_, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
UpperCAmelCase__ =VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short", norm_pix_loss=A_ ).to(
A_ )
with torch.no_grad():
UpperCAmelCase__ =model(**A_ )
UpperCAmelCase__ =torch.tensor(torch.tensor([0.64_69] ), device=A_ )
self.assertTrue(torch.allclose(outputs.loss, A_, atol=1E-4 ) )
| 510 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__ = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__ = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 47 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 256 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase = False ) -> bool:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
lowerCamelCase__ =[
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
lowerCamelCase__ =[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__lowerCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase__ =primes[:idx]
break
lowerCamelCase__ , lowerCamelCase__ =n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase__ =False
for r in range(__lowerCAmelCase ):
lowerCamelCase__ =pow(__lowerCAmelCase , d * 2**r , __lowerCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase__ =True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 719 | """simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
lowerCamelCase__ =quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 132 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = XLMRobertaTokenizer
lowercase__ : Optional[Any] = XLMRobertaTokenizerFast
lowercase__ : Union[str, Any] = True
lowercase__ : List[str] = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase_ , f.name )
lowerCAmelCase__ = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = pickle.dumps(lowerCamelCase_ )
pickle.loads(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase__ = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , ) | 90 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE( A ):
@staticmethod
@abstractmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError() | 498 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _snake_case ( _A ):
_A = 42
class _snake_case ( _A , _A ):
_A = True
@register_to_config
def __init__( self ,UpperCamelCase = 3 ,UpperCamelCase = 3 ,UpperCamelCase = ("DownEncoderBlock2D",) ,UpperCamelCase = ("UpDecoderBlock2D",) ,UpperCamelCase = (64,) ,UpperCamelCase = 1 ,UpperCamelCase = "silu" ,UpperCamelCase = 4 ,UpperCamelCase = 32 ,UpperCamelCase = 32 ,UpperCamelCase = 0.18215 ,) -> int:
super().__init__()
# pass init params to Encoder
snake_case__ :Union[str, Any] = Encoder(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,down_block_types=UpperCamelCase ,block_out_channels=UpperCamelCase ,layers_per_block=UpperCamelCase ,act_fn=UpperCamelCase ,norm_num_groups=UpperCamelCase ,double_z=UpperCamelCase ,)
# pass init params to Decoder
snake_case__ :int = Decoder(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,up_block_types=UpperCamelCase ,block_out_channels=UpperCamelCase ,layers_per_block=UpperCamelCase ,norm_num_groups=UpperCamelCase ,act_fn=UpperCamelCase ,)
snake_case__ :Any = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
snake_case__ :List[Any] = nn.Convad(UpperCamelCase ,UpperCamelCase ,1 )
snake_case__ :List[str] = False
snake_case__ :List[str] = False
# only relevant if vae tiling is enabled
snake_case__ :str = self.config.sample_size
snake_case__ :Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
snake_case__ :str = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
snake_case__ :int = 0.25
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=False ) -> str:
if isinstance(UpperCamelCase ,(Encoder, Decoder) ):
snake_case__ :List[str] = value
def lowerCAmelCase_ ( self ,UpperCamelCase = True ) -> List[str]:
snake_case__ :Any = use_tiling
def lowerCAmelCase_ ( self ) -> List[str]:
self.enable_tiling(UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = True
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase_ ( self ) -> Dict[str, AttentionProcessor]:
snake_case__ :List[Any] = {}
def fn_recursive_add_processors(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
if hasattr(UpperCamelCase ,"set_processor" ):
snake_case__ :int = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' ,UpperCamelCase ,UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
return processors
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :List[str] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase ,UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
if hasattr(UpperCamelCase ,"set_processor" ):
if not isinstance(UpperCamelCase ,UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' ,UpperCamelCase ,UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase ,return_dict=UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
snake_case__ :Optional[int] = [self.encoder(UpperCamelCase ) for x_slice in x.split(1 )]
snake_case__ :Union[str, Any] = torch.cat(UpperCamelCase )
else:
snake_case__ :Any = self.encoder(UpperCamelCase )
snake_case__ :str = self.quant_conv(UpperCamelCase )
snake_case__ :Dict = DiagonalGaussianDistribution(UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase ,return_dict=UpperCamelCase )
snake_case__ :Dict = self.post_quant_conv(UpperCamelCase )
snake_case__ :List[str] = self.decoder(UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
@apply_forward_hook
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
snake_case__ :str = [self._decode(UpperCamelCase ).sample for z_slice in z.split(1 )]
snake_case__ :Any = torch.cat(UpperCamelCase )
else:
snake_case__ :List[Any] = self._decode(UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
snake_case__ :int = min(a.shape[2] ,b.shape[2] ,UpperCamelCase )
for y in range(UpperCamelCase ):
snake_case__ :str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
snake_case__ :Union[str, Any] = min(a.shape[3] ,b.shape[3] ,UpperCamelCase )
for x in range(UpperCamelCase ):
snake_case__ :int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ) -> AutoencoderKLOutput:
snake_case__ :int = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
snake_case__ :Optional[int] = int(self.tile_latent_min_size * self.tile_overlap_factor )
snake_case__ :Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
snake_case__ :List[Any] = []
for i in range(0 ,x.shape[2] ,UpperCamelCase ):
snake_case__ :List[str] = []
for j in range(0 ,x.shape[3] ,UpperCamelCase ):
snake_case__ :Union[str, Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
snake_case__ :List[Any] = self.encoder(UpperCamelCase )
snake_case__ :Optional[Any] = self.quant_conv(UpperCamelCase )
row.append(UpperCamelCase )
rows.append(UpperCamelCase )
snake_case__ :Tuple = []
for i, row in enumerate(UpperCamelCase ):
snake_case__ :Tuple = []
for j, tile in enumerate(UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
snake_case__ :List[Any] = self.blend_v(rows[i - 1][j] ,UpperCamelCase ,UpperCamelCase )
if j > 0:
snake_case__ :List[str] = self.blend_h(row[j - 1] ,UpperCamelCase ,UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase ,dim=3 ) )
snake_case__ :Tuple = torch.cat(UpperCamelCase ,dim=2 )
snake_case__ :Dict = DiagonalGaussianDistribution(UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case__ :Tuple = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
snake_case__ :Any = int(self.tile_sample_min_size * self.tile_overlap_factor )
snake_case__ :List[str] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
snake_case__ :List[Any] = []
for i in range(0 ,z.shape[2] ,UpperCamelCase ):
snake_case__ :Tuple = []
for j in range(0 ,z.shape[3] ,UpperCamelCase ):
snake_case__ :Optional[Any] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
snake_case__ :Union[str, Any] = self.post_quant_conv(UpperCamelCase )
snake_case__ :Tuple = self.decoder(UpperCamelCase )
row.append(UpperCamelCase )
rows.append(UpperCamelCase )
snake_case__ :Optional[int] = []
for i, row in enumerate(UpperCamelCase ):
snake_case__ :Optional[int] = []
for j, tile in enumerate(UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
snake_case__ :List[str] = self.blend_v(rows[i - 1][j] ,UpperCamelCase ,UpperCamelCase )
if j > 0:
snake_case__ :str = self.blend_h(row[j - 1] ,UpperCamelCase ,UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase ,dim=3 ) )
snake_case__ :Union[str, Any] = torch.cat(UpperCamelCase ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = True ,UpperCamelCase = None ,) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case__ :Any = sample
snake_case__ :Optional[Any] = self.encode(UpperCamelCase ).latent_dist
if sample_posterior:
snake_case__ :Dict = posterior.sample(generator=UpperCamelCase )
else:
snake_case__ :int = posterior.mode()
snake_case__ :Optional[int] = self.decode(UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase ) | 57 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 57 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : list[list[int]] , UpperCamelCase : list[int] , UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : list[list[int]] , ):
A__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the reference grid
A__ = 1
A__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the action grid
A__ = init[0]
A__ = init[1]
A__ = 0
A__ = g + heuristic[x][y] # cost from starting cell to destination cell
A__ = [[f, g, x, y]]
A__ = False # flag that is set when search is complete
A__ = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A__ = cell.pop()
A__ = next_cell[2]
A__ = next_cell[3]
A__ = next_cell[1]
if x == goal[0] and y == goal[1]:
A__ = True
else:
for i in range(len(UpperCamelCase ) ): # to try out different valid actions
A__ = x + DIRECTIONS[i][0]
A__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A__ = g + cost
A__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A__ = 1
A__ = i
A__ = []
A__ = goal[0]
A__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A__ = x - DIRECTIONS[action[x][y]][0]
A__ = y - DIRECTIONS[action[x][y]][1]
A__ = xa
A__ = ya
invpath.append([x, y] )
A__ = []
for i in range(len(UpperCamelCase ) ):
path.append(invpath[len(UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase__ = [0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase__ = [len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase__ = 1
# the cost map which pushes the path closer to the goal
lowerCamelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase__ = 99
lowerCamelCase__ , lowerCamelCase__ = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 574 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 574 | 1 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__a : List[Any] = quote(_lowerCamelCase )
return hfh.hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" , revision=_lowerCamelCase )
| 63 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: Optional[int] = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Any = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__a: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 108 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def __A ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def __A ( self , **lowerCAmelCase__ ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __A ( self , **lowerCAmelCase__ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> Dict:
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = 'lower newer'
return input_text, output_text
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = 'Encode this sequence.'
SCREAMING_SNAKE_CASE = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = encoded.index(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = encoded.index(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> Dict:
pass
def __A ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __A ( self ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCAmelCase__ )
self.assertEqual(post_processor_state['add_prefix_space'] , lowerCAmelCase__ )
self.assertEqual(post_processor_state['trim_offsets'] , lowerCAmelCase__ )
def __A ( self ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 247 | 0 |
from ....utils import logging
__snake_case :Any = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=2_048):
'''simple docstring'''
__a = config.__dict__
__a = modal_hidden_size
if num_labels:
__a = num_labels
| 60 |
__snake_case :str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(_UpperCAmelCase )
__a = [s]
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
__a = True
__a = u
return visited[t]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [-1] * (len(_UpperCAmelCase ))
__a = 0
__a = []
__a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = float('''Inf''' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(_UpperCAmelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
A__: Union[str, Any] = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , *SCREAMING_SNAKE_CASE :int , **SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 1 |
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
@wraps(lowerCAmelCase__ )
def _inner_fn(*lowerCAmelCase__ , **lowerCAmelCase__ ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowerCAmelCase__ , )
return fn(*lowerCAmelCase__ , **lowerCAmelCase__ )
return _inner_fn
| 587 | def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 587 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = [[1, 2, 4], [1, 2, 3, 4]]
__a : Optional[Any] = DisjunctiveConstraint(_lowercase )
self.assertTrue(isinstance(dc.token_ids , _lowercase ) )
with self.assertRaises(_lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowercase ):
DisjunctiveConstraint(_lowercase ) # fails here
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = [[1, 2, 3], [1, 2, 4]]
__a : Any = DisjunctiveConstraint(_lowercase )
__a , __a , __a : Dict = dc.update(1 )
__a : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(_lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__a , __a , __a : List[Any] = dc.update(2 )
__a : int = stepped is True and completed is False and reset is False
self.assertTrue(_lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__a , __a , __a : List[str] = dc.update(3 )
__a : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(_lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__a : Optional[int] = DisjunctiveConstraint(_lowercase )
__a , __a , __a : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__a , __a , __a : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__a , __a , __a : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__a , __a , __a : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__a , __a , __a : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__a , __a , __a : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__a , __a , __a : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 581 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=False ):
__a : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__a : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Tuple=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__a : str = """"""
else:
__a : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__a : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__a : List[Any] = in_proj_weight[
: config.hidden_size, :
]
__a : Any = in_proj_bias[: config.hidden_size]
__a : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__a : Optional[int] = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str ):
__a : Any = dct.pop(_lowerCamelCase )
__a : List[Any] = val
def __magic_name__ ( ):
__a : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a : Any = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ):
__a : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
__a : List[Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__a : Any = 1_0_0_0
__a : Tuple = """huggingface/label-files"""
__a : int = """imagenet-1k-id2label.json"""
__a : int = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__a : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : Tuple = {v: k for k, v in idalabel.items()}
__a : int = int(deit_name[-6:-4] )
__a : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
__a : str = 1_9_2
__a : Union[str, Any] = 7_6_8
__a : Any = 1_2
__a : Optional[int] = 3
elif deit_name[9:].startswith("""small""" ):
__a : Union[str, Any] = 3_8_4
__a : int = 1_5_3_6
__a : Tuple = 1_2
__a : Dict = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
__a : Optional[Any] = 1_0_2_4
__a : Optional[Any] = 4_0_9_6
__a : Optional[int] = 2_4
__a : List[Any] = 1_6
# load original model from timm
__a : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__a : List[str] = timm_model.state_dict()
__a : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
__a : Union[str, Any] = DeiTForImageClassificationWithTeacher(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
__a : Optional[int] = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__a : Dict = DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size )
__a : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__a : Tuple = encoding["""pixel_values"""]
__a : int = model(_lowerCamelCase )
__a : Dict = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 581 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case:
def __init__( self , A_ , A_=13 , A_=32 , A_=3 , A_=4 , A_=[10, 20, 30, 40] , A_=[2, 2, 3, 2] , A_=True , A_=True , A_=37 , A_="gelu" , A_=10 , A_=0.02 , A_=["stage2", "stage3", "stage4"] , A_=3 , A_=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = num_stages
_SCREAMING_SNAKE_CASE = hidden_sizes
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = out_features
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = num_stages
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def A ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def A ( self ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=A_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=A_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def A ( self , A_ , A_ , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = UperNetForSemanticSegmentation(config=A_ )
model.to(A_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case( __A , __A , unittest.TestCase ):
_A = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_A = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
_A = False
_A = False
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = UperNetModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def A ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self ):
'''simple docstring'''
return
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A_ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def A ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def A ( self ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self ):
'''simple docstring'''
pass
def A ( self ):
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
_SCREAMING_SNAKE_CASE = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A_ , A_ ) )
_SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(A_ , A_ , A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = _config_zero_init(A_ )
_SCREAMING_SNAKE_CASE = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=A_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def A ( self ):
'''simple docstring'''
pass
@slow
def A ( self ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = UperNetForSemanticSegmentation.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A__ ( ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
_SCREAMING_SNAKE_CASE = Image.open(UpperCamelCase__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
_SCREAMING_SNAKE_CASE = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(A_ )
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = processor(images=A_ , return_tensors='''pt''' ).to(A_ )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**A_ )
_SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , A_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
_SCREAMING_SNAKE_CASE = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(A_ )
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = processor(images=A_ , return_tensors='''pt''' ).to(A_ )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**A_ )
_SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , A_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 ) )
| 710 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase__ = "laptop" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = F'''https://www.amazon.in/laptop/s?k={product}'''
_SCREAMING_SNAKE_CASE = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
_SCREAMING_SNAKE_CASE = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_SCREAMING_SNAKE_CASE = item.ha.text
_SCREAMING_SNAKE_CASE = '''https://www.amazon.in/''' + item.ha.a['''href''']
_SCREAMING_SNAKE_CASE = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_SCREAMING_SNAKE_CASE = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_SCREAMING_SNAKE_CASE = '''Not available'''
try:
_SCREAMING_SNAKE_CASE = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_SCREAMING_SNAKE_CASE = ''''''
try:
_SCREAMING_SNAKE_CASE = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
_SCREAMING_SNAKE_CASE = float('''nan''' )
except AttributeError:
pass
_SCREAMING_SNAKE_CASE = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_SCREAMING_SNAKE_CASE = ''' '''
_SCREAMING_SNAKE_CASE = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase : str = """headphones"""
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 168 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : str = KandinskyVaaImgaImgPipeline
snake_case__ : Union[str, Any] = ["image_embeds", "negative_image_embeds", "image"]
snake_case__ : Tuple = [
"image_embeds",
"negative_image_embeds",
"image",
]
snake_case__ : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ : Optional[int] = False
@property
def _UpperCamelCase ( self ) -> int:
return 32
@property
def _UpperCamelCase ( self ) -> Tuple:
return 32
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> Any:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return 100
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**lowercase__ )
return model
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(**lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCamelCase ( self , lowercase__ , lowercase__=0 ) -> str:
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase__ )
# create init_image
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(np.uinta(lowercase__ ) ).convert('RGB' ).resize((256, 256) )
if str(lowercase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(lowercase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**lowercase__ )
SCREAMING_SNAKE_CASE : int = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(lowercase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
**self.get_dummy_inputs(lowercase__ ) , return_dict=lowercase__ , )[0]
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
SCREAMING_SNAKE_CASE : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
SCREAMING_SNAKE_CASE : Optional[int] = 'A red cartoon frog, 4k'
SCREAMING_SNAKE_CASE : Dict = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Tuple = pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = pipe_prior(
lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
SCREAMING_SNAKE_CASE : str = pipeline(
image=lowercase__ , image_embeds=lowercase__ , negative_image_embeds=lowercase__ , generator=lowercase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
| 251 | '''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case__ : Tuple = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case__ : Optional[int] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Any:
SCREAMING_SNAKE_CASE : List[Any] = AudioClassificationPipeline(model=lowercase__ , feature_extractor=lowercase__ )
# test with a raw waveform
SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((34_000,) )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = examples
SCREAMING_SNAKE_CASE : Optional[Any] = audio_classifier(lowercase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowercase__ , [
{'score': ANY(lowercase__ ), 'label': ANY(lowercase__ )},
{'score': ANY(lowercase__ ), 'label': ANY(lowercase__ )},
] , )
SCREAMING_SNAKE_CASE : Optional[Any] = audio_classifier(lowercase__ , top_k=1 )
self.assertEqual(
lowercase__ , [
{'score': ANY(lowercase__ ), 'label': ANY(lowercase__ )},
] , )
self.run_torchaudio(lowercase__ )
@require_torchaudio
def _UpperCamelCase ( self , lowercase__ ) -> Dict:
import datasets
# test with a local file
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
SCREAMING_SNAKE_CASE : int = dataset[0]['audio']['array']
SCREAMING_SNAKE_CASE : List[str] = audio_classifier(lowercase__ )
self.assertEqual(
lowercase__ , [
{'score': ANY(lowercase__ ), 'label': ANY(lowercase__ )},
{'score': ANY(lowercase__ ), 'label': ANY(lowercase__ )},
] , )
@require_torch
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : List[Any] = 'anton-l/wav2vec2-random-tiny-classifier'
SCREAMING_SNAKE_CASE : Tuple = pipeline('audio-classification' , model=lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = np.ones((8_000,) )
SCREAMING_SNAKE_CASE : Tuple = audio_classifier(lowercase__ , top_k=4 )
SCREAMING_SNAKE_CASE : Dict = [
{'score': 0.0_8_4_2, 'label': 'no'},
{'score': 0.0_8_3_8, 'label': 'up'},
{'score': 0.0_8_3_7, 'label': 'go'},
{'score': 0.0_8_3_4, 'label': 'right'},
]
SCREAMING_SNAKE_CASE : Tuple = [
{'score': 0.0_8_4_5, 'label': 'stop'},
{'score': 0.0_8_4_4, 'label': 'on'},
{'score': 0.0_8_4_1, 'label': 'right'},
{'score': 0.0_8_3_4, 'label': 'left'},
]
self.assertIn(nested_simplify(lowercase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
SCREAMING_SNAKE_CASE : Tuple = {'array': np.ones((8_000,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
SCREAMING_SNAKE_CASE : List[Any] = audio_classifier(lowercase__ , top_k=4 )
self.assertIn(nested_simplify(lowercase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
import datasets
SCREAMING_SNAKE_CASE : List[str] = 'superb/wav2vec2-base-superb-ks'
SCREAMING_SNAKE_CASE : Optional[int] = pipeline('audio-classification' , model=lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' )
SCREAMING_SNAKE_CASE : Tuple = np.array(dataset[3]['speech'] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : str = audio_classifier(lowercase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=3 ) , [
{'score': 0.9_8_1, 'label': 'go'},
{'score': 0.0_0_7, 'label': 'up'},
{'score': 0.0_0_6, 'label': '_unknown_'},
{'score': 0.0_0_1, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def _UpperCamelCase ( self ) -> str:
pass
| 251 | 1 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( enum.Enum):
__lowercase : str = 0
__lowercase : List[Any] = 1
__lowercase : Tuple = 2
@add_end_docstrings(_snake_case)
class _UpperCAmelCase ( _snake_case):
__lowercase : int = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *snake_case_ , **snake_case_ ):
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case : int = None
if self.model.config.prefix is not None:
_snake_case : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case : str = self._sanitize_parameters(prefix=snake_case_ , **self._forward_params )
_snake_case : str = {**self._preprocess_params, **preprocess_params}
_snake_case : Optional[int] = {**self._forward_params, **forward_params}
def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ , ):
_snake_case : Optional[int] = {}
if prefix is not None:
_snake_case : List[str] = prefix
if prefix:
_snake_case : Dict = self.tokenizer(
snake_case_ , padding=snake_case_ , add_special_tokens=snake_case_ , return_tensors=self.framework )
_snake_case : Dict = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
" [None, 'hole']" )
_snake_case : Any = handle_long_generation
preprocess_params.update(snake_case_ )
_snake_case : int = generate_kwargs
_snake_case : Union[str, Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
_snake_case : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
_snake_case : str = ReturnType.TENSORS
if return_type is not None:
_snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
_snake_case : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case : List[str] = self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
if len(snake_case_ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
_snake_case : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*snake_case_ , **snake_case_ )
def __call__( self , snake_case_ , **snake_case_ ):
return super().__call__(snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_="" , snake_case_=None , **snake_case_ ):
_snake_case : Dict = self.tokenizer(
prefix + prompt_text , padding=snake_case_ , add_special_tokens=snake_case_ , return_tensors=self.framework )
_snake_case : Any = prompt_text
if handle_long_generation == "hole":
_snake_case : Dict = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case : Union[str, Any] = generate_kwargs["max_new_tokens"]
else:
_snake_case : int = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
_snake_case : Optional[Any] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case : int = inputs["attention_mask"][:, -keep_length:]
return inputs
def lowerCamelCase__ ( self , snake_case_ , **snake_case_ ):
_snake_case : str = model_inputs["input_ids"]
_snake_case : Optional[Any] = model_inputs.get("attention_mask" , snake_case_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case : List[Any] = None
_snake_case : str = None
_snake_case : Tuple = 1
else:
_snake_case : Optional[Any] = input_ids.shape[0]
_snake_case : Optional[int] = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case : Union[str, Any] = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
_snake_case : Any = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case : Optional[int] = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case : int = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case : Optional[int] = self.model.generate(input_ids=snake_case_ , attention_mask=snake_case_ , **snake_case_ )
_snake_case : int = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case : List[Any] = generated_sequence.reshape(snake_case_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case : Union[str, Any] = tf.reshape(snake_case_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCamelCase__ ( self , snake_case_ , snake_case_=ReturnType.FULL_TEXT , snake_case_=True ):
_snake_case : str = model_outputs["generated_sequence"][0]
_snake_case : int = model_outputs["input_ids"]
_snake_case : Optional[int] = model_outputs["prompt_text"]
_snake_case : List[Any] = generated_sequence.numpy().tolist()
_snake_case : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case : int = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case : int = self.tokenizer.decode(
snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case : Dict = 0
else:
_snake_case : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case : str = prompt_text + text[prompt_length:]
else:
_snake_case : Tuple = text[prompt_length:]
_snake_case : List[Any] = {"generated_text": all_text}
records.append(snake_case_ )
return records
| 87 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def a__ ( a : float , a : float , a : bool = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(a ), magnitude * sin(a )]
return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )]
def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ):
"""simple docstring"""
_snake_case : NDArray[floataa] = cross(a , a )
_snake_case : float = sum(a )
return abs(a ) < eps
if __name__ == "__main__":
# Test to check if it works
_a : Tuple = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_a : List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_a : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 87 | 1 |
_snake_case = 65521
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = 1
_A : Tuple = 0
for plain_chr in plain_text:
_A : Union[str, Any] = (a + ord(_lowercase )) % MOD_ADLER
_A : Dict = (b + a) % MOD_ADLER
return (b << 16) | a
| 307 | import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = OmegaConf.load(_lowercase )
SCREAMING_SNAKE_CASE : int = torch.load(_lowercase , map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE : int = list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : List[str] = '''first_stage_model.'''
for key in keys:
if key.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Any = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
SCREAMING_SNAKE_CASE : int = config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE : Tuple = config.model.params.unet_config.params
SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**_lowercase ).eval()
vqvae.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = UNetLDMModel(**_lowercase ).eval()
unet.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = LDMPipeline(_lowercase , _lowercase , _lowercase )
pipeline.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
__UpperCamelCase : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 248 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase :Dict = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = ['ChineseCLIPFeatureExtractor']
lowerCamelCase :str = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 704 |
import math
import sys
import cva
import numpy as np
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
_a = math.sqrt(_UpperCamelCase )
_a = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
_a = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
_a = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCamelCase ):
for j in range(0 , _UpperCamelCase ):
_a = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCamelCase , _UpperCamelCase )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> np.ndarray:
_a = np.zeros(img.shape )
_a = get_gauss_kernel(_UpperCamelCase , _UpperCamelCase )
_a , _a = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_a = get_slice(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_a = img_s - img_s[kernel_size // 2, kernel_size // 2]
_a = vec_gaussian(_UpperCamelCase , _UpperCamelCase )
_a = np.multiply(_UpperCamelCase , _UpperCamelCase )
_a = np.multiply(_UpperCamelCase , _UpperCamelCase )
_a = np.sum(_UpperCamelCase ) / np.sum(_UpperCamelCase )
_a = val
return imga
def __snake_case ( _UpperCamelCase ) -> tuple:
_a = args[1] if args[1:] else '''../image_data/lena.jpg'''
_a = float(args[2] ) if args[2:] else 1.0
_a = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_a = int(args[4] )
_a = kernel_size + abs(kernel_size % 2 - 1 )
else:
_a = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :List[Any] = parse_args(sys.argv)
lowerCamelCase :List[Any] = cva.imread(filename, 0)
cva.imshow('input image', img)
lowerCamelCase :Optional[Any] = img / 255
lowerCamelCase :Any = out.astype('float32')
lowerCamelCase :Tuple = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCamelCase :Dict = out * 255
lowerCamelCase :Any = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 346 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _lowerCAmelCase:
"""simple docstring"""
a : List[str] =BlenderbotConfig
a : Union[str, Any] ={}
a : Any ='''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=2_0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ):
UpperCamelCase_: List[str] = parent
UpperCamelCase_: int = batch_size
UpperCamelCase_: Optional[int] = seq_length
UpperCamelCase_: Any = is_training
UpperCamelCase_: Any = use_labels
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Optional[Any] = hidden_size
UpperCamelCase_: Tuple = num_hidden_layers
UpperCamelCase_: List[str] = num_attention_heads
UpperCamelCase_: List[Any] = intermediate_size
UpperCamelCase_: List[str] = hidden_dropout_prob
UpperCamelCase_: Optional[int] = attention_probs_dropout_prob
UpperCamelCase_: Union[str, Any] = max_position_embeddings
UpperCamelCase_: Optional[Any] = eos_token_id
UpperCamelCase_: Union[str, Any] = pad_token_id
UpperCamelCase_: Optional[Any] = bos_token_id
def _a ( self ):
UpperCamelCase_: List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: int = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: Optional[int] = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = TFBlenderbotModel(config=_lowerCamelCase ).get_decoder()
UpperCamelCase_: Any = inputs_dict['input_ids']
UpperCamelCase_: Dict = input_ids[:1, :]
UpperCamelCase_: Dict = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_: Optional[Any] = inputs_dict['head_mask']
UpperCamelCase_: List[Any] = 1
# first forward pass
UpperCamelCase_: Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: int = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase_: Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: str = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> List[Any]:
if attention_mask is None:
UpperCamelCase_: List[Any] = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
a : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
a : Optional[Any] =(
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
a : List[str] =True
a : Optional[int] =False
a : Tuple =False
def _a ( self ):
UpperCamelCase_: Any = TFBlenderbotModelTester(self )
UpperCamelCase_: int = ConfigTester(self , config_class=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_tokenizers
@require_tf
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =['''My friends are cool but they eat too many carbs.''']
a : Union[str, Any] ='''facebook/blenderbot-400M-distill'''
@cached_property
def _a ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self ):
UpperCamelCase_: Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _a ( self ):
UpperCamelCase_: Union[str, Any] = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCamelCase_: List[Any] = self.model.generate(
model_inputs.input_ids , )
UpperCamelCase_: List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
) | 57 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :List[Any] = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[Any] = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =(UniPCMultistepScheduler,)
lowercase_ : Tuple =(('''num_inference_steps''', 25),)
def A__ ( self ,**A__):
lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**A__)
return config
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
new_scheduler.set_timesteps(A__)
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(A__ ,time_step + scheduler.config.solver_order + 1):
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=0 ,**A__):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(A__)
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__)
lowercase = scheduler_class.from_pretrained(A__)
# copy over dummy past residuals
new_scheduler.set_timesteps(A__)
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = new_scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ,A__=None ,**A__):
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
return sample
def A__ ( self):
lowercase = dict(self.forward_default_kwargs)
lowercase = kwargs.pop('''num_inference_steps''' ,A__)
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**A__)
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ ,'''set_timesteps'''):
scheduler.set_timesteps(A__)
elif num_inference_steps is not None and not hasattr(A__ ,'''set_timesteps'''):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
lowercase = scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def A__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase = UniPCMultistepScheduler(**self.get_scheduler_config())
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config)
lowercase = DEISMultistepScheduler.from_config(scheduler.config)
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config)
lowercase = UniPCMultistepScheduler.from_config(scheduler.config)
lowercase = self.full_loop(scheduler=A__)
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A__)
def A__ ( self):
self.check_over_configs(thresholding=A__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ ,prediction_type=A__ ,sample_max_value=A__ ,solver_order=A__ ,solver_type=A__ ,)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__)
def A__ ( self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
lowercase = self.full_loop(
solver_order=A__ ,solver_type=A__ ,prediction_type=A__ ,)
assert not torch.isnan(A__).any(), "Samples have nan numbers"
def A__ ( self):
self.check_over_configs(lower_order_final=A__)
self.check_over_configs(lower_order_final=A__)
def A__ ( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=A__ ,time_step=0)
def A__ ( self):
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.2464) < 1E-3
def A__ ( self):
lowercase = self.full_loop(prediction_type='''v_prediction''')
lowercase = torch.mean(torch.abs(A__))
assert abs(result_mean.item() - 0.1014) < 1E-3
def A__ ( self):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=A__ ,dynamic_thresholding_ratio=0)
lowercase = scheduler_class(**A__)
lowercase = 1_0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__)
for i, t in enumerate(scheduler.timesteps):
lowercase = model(A__ ,A__)
lowercase = scheduler.step(A__ ,A__ ,A__).prev_sample
assert sample.dtype == torch.floataa
def A__ ( self ,**A__):
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**A__)
lowercase = scheduler_class(**A__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 633 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 466 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( __snake_case ):
def __init__( self : Dict , *__lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def _lowercase ( self : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str = "eval" ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(__lowerCamelCase )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
__lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions )
UpperCAmelCase = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCamelCase )
return metrics
def _lowercase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict=None , __lowerCamelCase : str = "test" ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_test_dataloader(__lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
__lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions , """predict""" )
UpperCAmelCase = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCamelCase )
| 377 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
UpperCamelCase_ : Any = logging.get_logger(__name__)
UpperCamelCase_ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ : List[str] = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ : Union[str, Any] = {
'''yjernite/retribert-base-uncased''': 512,
}
UpperCamelCase_ : str = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_INIT_CONFIGURATION
snake_case = RetriBertTokenizer
snake_case = ["input_ids", "attention_mask"]
def __init__( self : Tuple , _snake_case : int=None , _snake_case : Any=None , _snake_case : str=True , _snake_case : List[Any]="[UNK]" , _snake_case : Optional[Any]="[SEP]" , _snake_case : str="[PAD]" , _snake_case : List[str]="[CLS]" , _snake_case : List[str]="[MASK]" , _snake_case : Dict=True , _snake_case : Optional[int]=None , **_snake_case : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , _snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _snake_case ) != tokenize_chinese_chars
):
A_ = getattr(_snake_case , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**_snake_case )
A_ = do_lower_case
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
A_ = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 482 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int ) -> List[Any]:
"""simple docstring"""
A_ = {}
def lowerCamelCase__ ( self : List[Any] , _snake_case : str ) -> None:
"""simple docstring"""
A_ = {}
def lowerCamelCase__ ( self : Optional[int] , _snake_case : str , _snake_case : str , _snake_case : float ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(_snake_case )
if nodea not in self.connections:
self.add_node(_snake_case )
A_ = probability
def lowerCamelCase__ ( self : Tuple ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str ) -> str:
"""simple docstring"""
A_ = 0
A_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A_ (__a , __a , __a ):
'''simple docstring'''
A_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__a , __a , __a )
A_ = Counter(graph.get_nodes() )
A_ = start
for _ in range(__a ):
A_ = graph.transition(__a )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482 | 1 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = prime_factors(lowerCAmelCase_ )
if is_square_free(lowerCAmelCase_ ):
return -1 if len(lowerCAmelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
from __future__ import annotations
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ ) -> None:
'''simple docstring'''
__lowercase = order
# a_{0} ... a_{k}
__lowercase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__lowercase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__lowercase = [0.0] * self.order
# y[n-1] ... y[n-k]
__lowercase = [0.0] * self.order
def A ( self , snake_case_ , snake_case_ ) -> None:
'''simple docstring'''
if len(snake_case_ ) < self.order:
__lowercase = [1.0, *a_coeffs]
if len(snake_case_ ) != self.order + 1:
__lowercase = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case_ )}'
)
raise ValueError(snake_case_ )
if len(snake_case_ ) != self.order + 1:
__lowercase = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case_ )}'
)
raise ValueError(snake_case_ )
__lowercase = a_coeffs
__lowercase = b_coeffs
def A ( self , snake_case_ ) -> float:
'''simple docstring'''
__lowercase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__lowercase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__lowercase = self.input_history[:-1]
__lowercase = self.output_history[:-1]
__lowercase = sample
__lowercase = result
return result
| 639 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :str = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase__ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
A_ : Optional[Any] = torch.load(hf_hub_download(repo_id=lowerCamelCase__ , filename="""pytorch_model.bin""" ) )
A_ : List[Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
A_ : Dict = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
A_ : Dict = tensor_value
A_ : Tuple = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase__ , config=lowerCamelCase__ , state_dict=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
# convert tokenizer
A_ : str = AutoTokenizer.from_pretrained(lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 686 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = original_name.split("." )[0]
_lowerCamelCase : List[Any] = key.split("." )
_lowerCamelCase : Dict = int(key_list[key_list.index(_lowerCAmelCase ) - 2] )
_lowerCamelCase : Union[str, Any] = int(key_list[key_list.index(_lowerCAmelCase ) - 1] )
_lowerCamelCase : Optional[int] = orig_block_num - offset
_lowerCamelCase : Dict = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = OrderedDict()
_lowerCamelCase , _lowerCamelCase : Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
_lowerCamelCase : List[str] = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCamelCase : Dict = key[: key.find("proj" )]
_lowerCamelCase : int = key.replace(_lowerCAmelCase , F'patch_embeddings.{total_embed_found}.' )
_lowerCamelCase : Tuple = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCamelCase : List[Any] = "poolformer.encoder." + key
if "mlp.fc1" in key:
_lowerCamelCase : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
_lowerCamelCase : str = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
_lowerCamelCase : int = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "norm1" , "before_norm" )
if "norm2" in key:
_lowerCamelCase : int = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
_lowerCamelCase : str = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
_lowerCamelCase : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
_lowerCamelCase : Tuple = key.replace("head" , "classifier" )
_lowerCamelCase : Tuple = value
return new_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = PoolFormerConfig()
# set attributes based on model_name
_lowerCamelCase : Optional[int] = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = model_name[-3:]
_lowerCamelCase : str = 1000
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[Any] = (1, 1000)
# set config attributes
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Tuple = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCamelCase : List[Any] = [2, 2, 6, 2]
_lowerCamelCase : Optional[int] = [64, 128, 320, 512]
_lowerCamelCase : Any = 4.0
_lowerCamelCase : int = 0.9
elif size == "s24":
_lowerCamelCase : List[str] = [4, 4, 12, 4]
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 4.0
_lowerCamelCase : Dict = 0.9
elif size == "s36":
_lowerCamelCase : List[str] = [6, 6, 18, 6]
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 4.0
_lowerCamelCase : Optional[int] = 1E-6
_lowerCamelCase : Union[str, Any] = 0.9
elif size == "m36":
_lowerCamelCase : Optional[Any] = [6, 6, 18, 6]
_lowerCamelCase : Dict = [96, 192, 384, 768]
_lowerCamelCase : Optional[Any] = 4.0
_lowerCamelCase : Union[str, Any] = 1E-6
_lowerCamelCase : Tuple = 0.9_5
elif size == "m48":
_lowerCamelCase : Optional[Any] = [8, 8, 24, 8]
_lowerCamelCase : Optional[Any] = [96, 192, 384, 768]
_lowerCamelCase : List[str] = 4.0
_lowerCamelCase : Union[str, Any] = 1E-6
_lowerCamelCase : str = 0.9_5
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
_lowerCamelCase : Union[str, Any] = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase )
# Prepare image
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : List[str] = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location=torch.device("cpu" ) )
# rename keys
_lowerCamelCase : Dict = rename_keys(_lowerCAmelCase )
# create HuggingFace model and load state dict
_lowerCamelCase : Optional[Any] = PoolFormerForImageClassification(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Define image processor
_lowerCamelCase : Optional[Any] = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
_lowerCamelCase : Any = outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCamelCase : Tuple = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
_lowerCamelCase : Tuple = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
_lowerCamelCase : Tuple = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
_lowerCamelCase : Dict = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
_lowerCamelCase : str = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 44 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 44 | 1 |
def snake_case (UpperCamelCase : list[int] ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowerCamelCase__ = sum(UpperCamelCase ) / len(UpperCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 235 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self : str , a_ : Optional[int] , a_ : str , a_ : Tuple ):
"""simple docstring"""
lowerCamelCase__ = TextaTextGenerationPipeline(model=a_ , tokenizer=a_ )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self : Tuple , a_ : int , a_ : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = generator("""Something there""" )
self.assertEqual(a_ , [{"""generated_text""": ANY(a_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=a_ )
self.assertEqual(
a_ , [
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
] , )
lowerCamelCase__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=a_ )
self.assertEqual(
a_ , [
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
] , )
with self.assertRaises(a_ ):
generator(4 )
@require_torch
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase__ = generator("""Something there""" , do_sample=a_ )
self.assertEqual(a_ , [{"""generated_text""": """"""}] )
lowerCamelCase__ = 3
lowerCamelCase__ = generator(
"""Something there""" , num_return_sequences=a_ , num_beams=a_ , )
lowerCamelCase__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(a_ , a_ )
lowerCamelCase__ = generator("""This is a test""" , do_sample=a_ , num_return_sequences=2 , return_tensors=a_ )
self.assertEqual(
a_ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase__ = generator.model.config.eos_token_id
lowerCamelCase__ = """<pad>"""
lowerCamelCase__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=a_ , num_return_sequences=2 , batch_size=2 , return_tensors=a_ , )
self.assertEqual(
a_ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase__ = generator("""Something there""" , do_sample=a_ )
self.assertEqual(a_ , [{"""generated_text""": """"""}] )
| 235 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("Enter image url: ").strip()
print(f'Downloading image from {url} ...')
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
SCREAMING_SNAKE_CASE__ = soup.find("meta", {"property": "og:image"})["content"]
SCREAMING_SNAKE_CASE__ = requests.get(image_url).content
SCREAMING_SNAKE_CASE__ = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 532 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : int = len(_UpperCamelCase )
__lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_UpperCamelCase )]
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float
for row in range(_UpperCamelCase ):
for col in range(_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = matrix[row][col]
__lowerCAmelCase : Optional[int] = vector[row][0]
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : str = 0
while row < size and col < size:
# pivoting
__lowerCAmelCase : Tuple = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCamelCase , _UpperCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCamelCase ):
__lowerCAmelCase : Tuple = augmented[rowa][col] / augmented[row][col]
__lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCamelCase ):
for row in range(_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCamelCase )
]
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : int = len(_UpperCamelCase )
__lowerCAmelCase : Matrix = [[0 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
__lowerCAmelCase : Matrix = [[0] for _ in range(_UpperCamelCase )]
__lowerCAmelCase : Matrix
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
for x_val, y_val in enumerate(_UpperCamelCase ):
for col in range(_UpperCamelCase ):
__lowerCAmelCase : List[Any] = (x_val + 1) ** (size - col - 1)
__lowerCAmelCase : int = y_val
__lowerCAmelCase : Union[str, Any] = solve(_UpperCamelCase , _UpperCamelCase )
def interpolated_func(_UpperCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCamelCase ) )
return interpolated_func
def __lowerCAmelCase (_UpperCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (_UpperCamelCase = question_function , _UpperCamelCase = 10 ):
__lowerCAmelCase : list[int] = [func(_UpperCamelCase ) for x_val in range(1 , order + 1 )]
__lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowerCAmelCase : int = 0
__lowerCAmelCase : Callable[[int], int]
__lowerCAmelCase : int
for poly in polynomials:
__lowerCAmelCase : Optional[Any] = 1
while func(_UpperCamelCase ) == poly(_UpperCamelCase ):
x_val += 1
ret += poly(_UpperCamelCase )
return ret
if __name__ == "__main__":
print(f'{solution() = }') | 549 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCamelCase__ = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
lowerCamelCase__ = """hopper-medium-v2"""
lowerCamelCase__ = gym.make(env_name)
lowerCamelCase__ = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
lowerCamelCase__ = env.reset()
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 1_000
lowerCamelCase__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCamelCase__ = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = env.step(denorm_actions)
lowerCamelCase__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCamelCase__ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}') | 549 | 1 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(lowerCAmelCase_ , '''_dynamo''' ):
return False
return isinstance(lowerCAmelCase_ , torch._dynamo.eval_frame.OptimizedModule )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = True ) -> Dict:
_snake_case = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_snake_case = is_compiled_module(lowerCAmelCase_ )
if is_compiled:
_snake_case = model
_snake_case = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model.module
if not keep_fpaa_wrapper:
_snake_case = getattr(lowerCAmelCase_ , '''forward''' )
_snake_case = model.__dict__.pop('''_original_forward''' , lowerCAmelCase_ )
if original_forward is not None:
while hasattr(lowerCAmelCase_ , '''__wrapped__''' ):
_snake_case = forward.__wrapped__
if forward == original_forward:
break
_snake_case = forward
if getattr(lowerCAmelCase_ , '''_converted_to_transformer_engine''' , lowerCAmelCase_ ):
convert_model(lowerCAmelCase_ , to_transformer_engine=lowerCAmelCase_ )
if is_compiled:
_snake_case = model
_snake_case = compiled_model
return model
def snake_case ( ) -> str:
PartialState().wait_for_everyone()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCAmelCase_ , lowerCAmelCase_ )
elif PartialState().local_process_index == 0:
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
@contextmanager
def snake_case ( **lowerCAmelCase_ ) -> str:
for key, value in kwargs.items():
_snake_case = str(lowerCAmelCase_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def snake_case ( lowerCAmelCase_ ) -> Tuple:
if not hasattr(lowerCAmelCase_ , '''__qualname__''' ) and not hasattr(lowerCAmelCase_ , '''__name__''' ):
_snake_case = getattr(lowerCAmelCase_ , '''__class__''' , lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , '''__qualname__''' ):
return obj.__qualname__
if hasattr(lowerCAmelCase_ , '''__name__''' ):
return obj.__name__
return str(lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
for key, value in source.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = destination.setdefault(lowerCAmelCase_ , {} )
merge_dicts(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = value
return destination
def snake_case ( lowerCAmelCase_ = None ) -> bool:
if port is None:
_snake_case = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 103 |
"""simple docstring"""
from math import sqrt
def snake_case ( lowerCAmelCase_ = 1000000 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCAmelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 103 | 1 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase_ ( __A : Optional[Any] ):
'''simple docstring'''
return getitem, k
def lowerCAmelCase_ ( __A : Any , __A : Optional[int] ):
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
return delitem, k
def lowerCAmelCase_ ( __A : str , __A : int , *__A : Tuple ):
'''simple docstring'''
try:
return fun(__A , *__A ), None
except Exception as e:
return None, e
__UpperCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__UpperCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__UpperCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__UpperCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCAmelCase_ ( __A : str ):
'''simple docstring'''
snake_case: List[Any] = HashMap(initial_block_size=4 )
snake_case: List[Any] = {}
for _, (fun, *args) in enumerate(__A ):
snake_case , snake_case: Optional[int] = _run_operation(__A , __A , *__A )
snake_case , snake_case: str = _run_operation(__A , __A , *__A )
assert my_res == py_res
assert str(__A ) == str(__A )
assert set(__A ) == set(__A )
assert len(__A ) == len(__A )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase_ ( ):
'''simple docstring'''
def is_public(__A : str ) -> bool:
return not name.startswith('_' )
snake_case: Dict = {name for name in dir({} ) if is_public(__A )}
snake_case: List[str] = {name for name in dir(HashMap() ) if is_public(__A )}
assert dict_public_names > hash_public_names | 692 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case: Any = cst_fwd.get(__A , np.inf )
snake_case: int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case: Union[str, Any] = new_cost_f
snake_case: Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case: List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[Any] = -1
snake_case: Any = set()
snake_case: str = set()
snake_case: int = {source: 0}
snake_case: Dict = {destination: 0}
snake_case: int = {source: None}
snake_case: Union[str, Any] = {destination: None}
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case , snake_case: List[str] = queue_forward.get()
visited_forward.add(__A )
snake_case , snake_case: int = queue_backward.get()
visited_backward.add(__A )
snake_case: str = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
snake_case: Optional[Any] = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case: Any = shortest_distance
return shortest_path_distance
__UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
__UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 692 | 1 |
'''simple docstring'''
import os
import sys
import unittest
UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCamelCase_ = os.path.join(git_repo_path, "src", "transformers")
UpperCamelCase_ = "\n{0} = None\n"
UpperCamelCase_ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
UpperCamelCase_ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(A )
SCREAMING_SNAKE_CASE : List[str] = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(A, 'tokenizers' )
SCREAMING_SNAKE_CASE : Optional[int] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(A, 'tensorflow_text' )
SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(A, 'sentencepiece_and_tokenizers' )
SCREAMING_SNAKE_CASE : Dict = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(A, 'sentencepiece_and_tensorflow_text' )
SCREAMING_SNAKE_CASE : List[Any] = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(A, 'sentencepiece_and_tokenizers_and_vision' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch', A )
self.assertIn('tensorflow_text', A )
self.assertIn('sentencepiece_and_tokenizers', A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel', objects['torch'] )
self.assertIn('TFBertModel', objects['tf'] )
self.assertIn('FlaxBertModel', objects['flax'] )
self.assertIn('BertModel', objects['torch'] )
self.assertIn('TFBertTokenizer', objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer', objects['sentencepiece_and_tokenizers'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('CONSTANT', '\'torch\'' )
self.assertEqual(A, '\nCONSTANT = None\n' )
SCREAMING_SNAKE_CASE : str = create_dummy_object('function', '\'torch\'' )
self.assertEqual(
A, '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
SCREAMING_SNAKE_CASE : str = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
SCREAMING_SNAKE_CASE : Optional[int] = create_dummy_object('FakeClass', '\'torch\'' )
self.assertEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
SCREAMING_SNAKE_CASE : Dict = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'], A )
| 28 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 1 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
UpperCamelCase_ = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
UpperCamelCase_ = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowerCAmelCase__ ( a_ : int ) -> Optional[Any]:
UpperCAmelCase__ : Dict = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ : str = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase__ : Optional[Any] = numpy_to_pil(_lowercase )
return images
def lowerCAmelCase__ ( a_ : Dict ) -> Optional[Any]:
if images.ndim == 3:
UpperCAmelCase__ : Optional[int] = images[None, ...]
UpperCAmelCase__ : Any = (images * 2_5_5).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase__ : Tuple = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
UpperCAmelCase__ : int = [Image.fromarray(_lowercase ) for image in images]
return pil_images | 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 599 | 0 |
"""simple docstring"""
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def A_ ( lowercase ) -> bytes:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
UpperCAmelCase_ : Optional[int] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowercase )
UpperCAmelCase_ : List[str] = """""".join(bin(lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : str = len(lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Any = b"""=""" * ((6 - len(lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase ) % 6)
else:
UpperCAmelCase_ : Dict = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase ) , 6 ) ).encode()
+ padding
)
def A_ ( lowercase ) -> bytes:
"""simple docstring"""
if not isinstance(lowercase , lowercase ) and not isinstance(lowercase , lowercase ):
UpperCAmelCase_ : Any = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase , lowercase ):
try:
UpperCAmelCase_ : Dict = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
UpperCAmelCase_ : int = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : Optional[Any] = encoded_data[:-padding]
UpperCAmelCase_ : Optional[Any] = """""".join(
bin(B64_CHARSET.index(lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : List[Any] = """""".join(
bin(B64_CHARSET.index(lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase ) , 8 )
]
return bytes(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470 |
"""simple docstring"""
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , a_ : int )-> str:
"""simple docstring"""
UpperCAmelCase_ : Any = n
UpperCAmelCase_ : str = [None] * self.n
UpperCAmelCase_ : List[Any] = 0 # index of the first element
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Dict = 0
def __len__( self : Union[str, Any] )-> int:
"""simple docstring"""
return self.size
def a ( self : Dict )-> bool:
"""simple docstring"""
return self.size == 0
def a ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def a ( self : Dict , a_ : int )-> Optional[int]:
"""simple docstring"""
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
UpperCAmelCase_ : List[str] = data
UpperCAmelCase_ : Optional[int] = (self.rear + 1) % self.n
self.size += 1
return self
def a ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
if self.size == 0:
raise Exception("""UNDERFLOW""" )
UpperCAmelCase_ : Tuple = self.array[self.front]
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = (self.front + 1) % self.n
self.size -= 1
return temp
| 470 | 1 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[float, list[float]]:
lowerCamelCase : List[str] =list(range(len(SCREAMING_SNAKE_CASE_ ) ) )
lowerCamelCase : List[str] =[v / w for v, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
index.sort(key=lambda SCREAMING_SNAKE_CASE_ : ratio[i] , reverse=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : float =0
lowerCamelCase : list[float] =[0] * len(SCREAMING_SNAKE_CASE_ )
for i in index:
if weight[i] <= capacity:
lowerCamelCase : Dict =1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase : Union[str, Any] =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = '''▁'''
snake_case_ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
snake_case_ = {
'''facebook/m2m100_418M''': 1_0_2_4,
}
# fmt: off
snake_case_ = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class snake_case_ ( _A):
lowerCamelCase :Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :Union[str, Any] = ["input_ids", "attention_mask"]
lowerCamelCase :List[int] = []
lowerCamelCase :List[int] = []
def __init__( self , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="<unk>" , __lowercase="m2m100" , __lowercase = None , __lowercase=8 , **__lowercase , ) -> None:
lowerCamelCase : Union[str, Any] ={} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase : List[str] =language_codes
lowerCamelCase : int =FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCamelCase : str ={lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
lowerCamelCase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowercase , tgt_lang=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , language_codes=__lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowercase , **__lowercase , )
lowerCamelCase : Dict =vocab_file
lowerCamelCase : List[Any] =load_json(__lowercase )
lowerCamelCase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowerCamelCase : List[Any] =spm_file
lowerCamelCase : str =load_spm(__lowercase , self.sp_model_kwargs )
lowerCamelCase : Tuple =len(self.encoder )
lowerCamelCase : Optional[int] ={
self.get_lang_token(__lowercase ): self.encoder_size + i for i, lang_code in enumerate(__lowercase )
}
lowerCamelCase : Tuple ={lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowercase )}
lowerCamelCase : Tuple ={v: k for k, v in self.lang_token_to_id.items()}
lowerCamelCase : Optional[Any] =src_lang if src_lang is not None else '''en'''
lowerCamelCase : Any =tgt_lang
lowerCamelCase : List[Any] =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCamelCase : Optional[Any] =num_madeup_words
@property
def __lowercase ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowercase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __lowercase ( self , __lowercase ) -> None:
lowerCamelCase : Any =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self , __lowercase ) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def __lowercase ( self , __lowercase ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowercase , self.encoder[self.unk_token] )
def __lowercase ( self , __lowercase ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowercase , self.unk_token )
def __lowercase ( self , __lowercase ) -> str:
lowerCamelCase : Dict =[]
lowerCamelCase : Dict =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
lowerCamelCase : str =[]
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __lowercase ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
lowerCamelCase : int =[1] * len(self.prefix_tokens )
lowerCamelCase : List[str] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase )) + suffix_ones
return prefix_ones + ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def __lowercase ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self ) -> Dict:
lowerCamelCase : Dict ={self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
lowerCamelCase : Optional[Any] =self.__dict__.copy()
lowerCamelCase : Union[str, Any] =None
return state
def __setstate__( self , __lowercase ) -> None:
lowerCamelCase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase : Optional[int] ={}
lowerCamelCase : Optional[Any] =load_spm(self.spm_file , self.sp_model_kwargs )
def __lowercase ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
lowerCamelCase : Optional[Any] =Path(__lowercase )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
lowerCamelCase : List[str] =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase : Tuple =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowercase )
elif not os.path.isfile(self.spm_file ):
with open(__lowercase , '''wb''' ) as fi:
lowerCamelCase : List[Any] =self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (str(__lowercase ), str(__lowercase ))
def __lowercase ( self , __lowercase , __lowercase = "en" , __lowercase = None , __lowercase = "ro" , **__lowercase , ) -> BatchEncoding:
lowerCamelCase : Union[str, Any] =src_lang
lowerCamelCase : Optional[int] =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , **__lowercase ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase : List[Any] =src_lang
lowerCamelCase : str =self(__lowercase , add_special_tokens=__lowercase , **__lowercase )
lowerCamelCase : Tuple =self.get_lang_id(__lowercase )
lowerCamelCase : Optional[int] =tgt_lang_id
return inputs
def __lowercase ( self ) -> List[str]:
self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self ) -> List[str]:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self , __lowercase ) -> None:
lowerCamelCase : Union[str, Any] =self.get_lang_token(__lowercase )
lowerCamelCase : List[Any] =self.lang_token_to_id[lang_token]
lowerCamelCase : Optional[Any] =[self.cur_lang_id]
lowerCamelCase : Union[str, Any] =[self.eos_token_id]
def __lowercase ( self , __lowercase ) -> None:
lowerCamelCase : Tuple =self.get_lang_token(__lowercase )
lowerCamelCase : Tuple =self.lang_token_to_id[lang_token]
lowerCamelCase : List[Any] =[self.cur_lang_id]
lowerCamelCase : Tuple =[self.eos_token_id]
def __lowercase ( self , __lowercase ) -> str:
return self.lang_code_to_token[lang]
def __lowercase ( self , __lowercase ) -> int:
lowerCamelCase : List[str] =self.get_lang_token(__lowercase )
return self.lang_token_to_id[lang_token]
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> sentencepiece.SentencePieceProcessor:
lowerCamelCase : List[Any] =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE_ )
spm.Load(str(SCREAMING_SNAKE_CASE_ ) )
return spm
def A__ ( SCREAMING_SNAKE_CASE_ ) -> Union[Dict, List]:
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=2 )
| 262 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = 1
lowerCamelCase__: Union[str, Any] = 3
lowerCamelCase__: str = (32, 32)
lowerCamelCase__: str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__: Dict = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__: Dict = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__: str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
return CLIPTextModel(__a )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: List[str] = self.dummy_cond_unet_upscale
lowerCamelCase__: Optional[Any] = DDPMScheduler()
lowerCamelCase__: Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCamelCase__: Tuple = self.dummy_vae
lowerCamelCase__: Optional[int] = self.dummy_text_encoder
lowerCamelCase__: Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__: Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__: Optional[Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
lowerCamelCase__: Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__: List[str] = """A painting of a squirrel eating a burger"""
lowerCamelCase__: Dict = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase__: Any = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase__: List[str] = output.images
lowerCamelCase__: Union[str, Any] = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase__: List[str] = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__a , )[0]
lowerCamelCase__: Tuple = image[0, -3:, -3:, -1]
lowerCamelCase__: int = image_from_tuple[0, -3:, -3:, -1]
lowerCamelCase__: int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCamelCase__: List[str] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: List[str] = self.dummy_cond_unet_upscale
lowerCamelCase__: Optional[int] = DDPMScheduler()
lowerCamelCase__: Any = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCamelCase__: List[str] = self.dummy_vae
lowerCamelCase__: Optional[Any] = self.dummy_text_encoder
lowerCamelCase__: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__: str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__: List[str] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase__: Tuple = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
lowerCamelCase__: List[str] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__: Any = """A painting of a squirrel eating a burger"""
lowerCamelCase__: str = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase__: Any = output.images
assert image.shape[0] == 2
lowerCamelCase__: Optional[Any] = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase__: Dict = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase__: Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: int = self.dummy_cond_unet_upscale
lowerCamelCase__: Dict = DDPMScheduler()
lowerCamelCase__: Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCamelCase__: List[str] = self.dummy_vae
lowerCamelCase__: Tuple = self.dummy_text_encoder
lowerCamelCase__: int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__: Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__: Union[str, Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCamelCase__: Optional[int] = unet.half()
lowerCamelCase__: Optional[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
lowerCamelCase__: List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__: Tuple = """A painting of a squirrel eating a burger"""
lowerCamelCase__: Optional[int] = torch.manual_seed(0 )
lowerCamelCase__: Optional[Any] = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="""np""" , ).images
lowerCamelCase__: Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCamelCase__: List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
lowerCamelCase__: Dict = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCamelCase__: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase__: List[Any] = """a cat sitting on a park bench"""
lowerCamelCase__: Dict = torch.manual_seed(0 )
lowerCamelCase__: Any = pipe(
prompt=__a , image=__a , generator=__a , output_type="""np""" , )
lowerCamelCase__: Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCamelCase__: int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
lowerCamelCase__: int = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCamelCase__: List[str] = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase__: Any = """a cat sitting on a park bench"""
lowerCamelCase__: Tuple = torch.manual_seed(0 )
lowerCamelCase__: Optional[int] = pipe(
prompt=__a , image=__a , generator=__a , output_type="""np""" , )
lowerCamelCase__: int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__: Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCamelCase__: Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCamelCase__: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__: str = """a cat sitting on a park bench"""
lowerCamelCase__: int = torch.manual_seed(0 )
lowerCamelCase__: Optional[Any] = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="""np""" , )
lowerCamelCase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 306 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A__ , unittest.TestCase ):
__lowerCamelCase = LayoutLMTokenizer
__lowerCamelCase = LayoutLMTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : int , **__a : Union[str, Any] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCamelCase_ ( self : List[Any] , __a : List[str] ):
'''simple docstring'''
lowerCamelCase__: str = """UNwant\u00E9d,running"""
lowerCamelCase__: Any = """unwanted, running"""
return input_text, output_text
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase__: Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
| 306 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=False ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : List[Any] = len(set_a.intersection(SCREAMING_SNAKE_CASE_ ) )
if alternative_union:
snake_case : List[str] = len(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Union[str, Any] = len(set_a.union(SCREAMING_SNAKE_CASE_ ) )
return intersection / union
if isinstance(SCREAMING_SNAKE_CASE_ ,(list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE_ ,(list, tuple) ):
snake_case : List[str] = [element for element in set_a if element in set_b]
if alternative_union:
snake_case : str = len(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) / union
else:
snake_case : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
return None
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = {'a', 'b', 'c', 'd', 'e'}
lowerCamelCase : Optional[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : int ='''speech_to_text_2'''
snake_case__ : str =['''past_key_values''']
snake_case__ : Dict ={'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: List[Any] , __a: List[Any]=10_000 , __a: Optional[int]=6 , __a: str=2_048 , __a: Optional[Any]=4 , __a: Any=0.0 , __a: Optional[int]=True , __a: Optional[int]="relu" , __a: str=256 , __a: Union[str, Any]=0.1 , __a: Any=0.0 , __a: int=0.0 , __a: Tuple=0.02 , __a: Optional[Any]=2 , __a: int=True , __a: Tuple=1 , __a: Any=0 , __a: Any=2 , __a: List[str]=1_024 , **__a: Optional[Any] , )-> Tuple:
lowerCamelCase : Any = vocab_size
lowerCamelCase : Dict = d_model
lowerCamelCase : Dict = decoder_ffn_dim
lowerCamelCase : Optional[Any] = decoder_layers
lowerCamelCase : Tuple = decoder_attention_heads
lowerCamelCase : str = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = decoder_layerdrop
lowerCamelCase : int = use_cache
lowerCamelCase : Tuple = decoder_layers
lowerCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 222 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def snake_case ( ) -> Generator[int, None, None]:
lowerCamelCase : dict[int, int] = {}
lowerCamelCase : str = 2
while True:
lowerCamelCase : int = factor_map.pop(UpperCamelCase__ , UpperCamelCase__ )
if factor:
lowerCamelCase : List[Any] = factor + prime
while x in factor_map:
x += factor
lowerCamelCase : int = factor
else:
lowerCamelCase : Optional[int] = prime
yield prime
prime += 1
def snake_case ( UpperCamelCase__ : float = 1E10 ) -> int:
lowerCamelCase : Optional[int] = sieve()
lowerCamelCase : List[str] = 1
while True:
lowerCamelCase : Tuple = next(UpperCamelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 222 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
lowerCamelCase__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : Tuple = 'umt5'
lowerCamelCase_ : Any = ['past_key_values']
def __init__(self : Tuple , _snake_case : Optional[int]=25_0112 , _snake_case : str=512 , _snake_case : Optional[int]=64 , _snake_case : Dict=1024 , _snake_case : Tuple=8 , _snake_case : Dict=None , _snake_case : Dict=6 , _snake_case : int=32 , _snake_case : Optional[int]=128 , _snake_case : Tuple=0.1 , _snake_case : List[Any]=1e-6 , _snake_case : List[Any]=1.0 , _snake_case : Optional[int]="gated-gelu" , _snake_case : Tuple=True , _snake_case : Tuple=True , _snake_case : List[str]="T5Tokenizer" , _snake_case : int=True , _snake_case : Any=0 , _snake_case : Optional[Any]=1 , _snake_case : str=0 , **_snake_case : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_snake_case , tokenizer_class=_snake_case , tie_word_embeddings=_snake_case , pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
lowerCamelCase_ : int = vocab_size
lowerCamelCase_ : List[str] = d_model
lowerCamelCase_ : Tuple = d_kv
lowerCamelCase_ : Tuple = d_ff
lowerCamelCase_ : List[Any] = num_layers
lowerCamelCase_ : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase_ : int = num_heads
lowerCamelCase_ : str = relative_attention_num_buckets
lowerCamelCase_ : List[Any] = relative_attention_max_distance
lowerCamelCase_ : str = dropout_rate
lowerCamelCase_ : List[str] = layer_norm_epsilon
lowerCamelCase_ : Optional[Any] = initializer_factor
lowerCamelCase_ : Optional[Any] = feed_forward_proj
lowerCamelCase_ : List[Any] = use_cache
lowerCamelCase_ : int = self.feed_forward_proj.split('-' )
lowerCamelCase_ : Optional[int] = act_info[-1]
lowerCamelCase_ : int = act_info[0] == 'gated'
if len(_snake_case ) > 1 and act_info[0] != "gated" or len(_snake_case ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
lowerCamelCase_ : Dict = 'gelu_new'
@property
def UpperCAmelCase_ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase_ (self : Optional[int] ) -> int:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase_ (self : int ) -> str:
"""simple docstring"""
return self.num_layers
class lowerCamelCase__ ( UpperCAmelCase ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase_ (self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowerCamelCase_ : Optional[Any] = 'past_encoder_sequence + sequence'
lowerCamelCase_ : List[str] = {0: 'batch'}
lowerCamelCase_ : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowerCamelCase_ : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
lowerCamelCase_ : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase_ (self : List[str] ) -> float:
"""simple docstring"""
return 5e-4
| 144 | 1 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a : str = NewType('DataClass', Any)
a : int = NewType('DataClassType', Any)
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def __magic_name__ ( __UpperCAmelCase ) -> Callable[[str], Any]:
'''simple docstring'''
snake_case_ = {str(__UpperCAmelCase ): choice for choice in choices}
return lambda __UpperCAmelCase : str_to_choice.get(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( *,
__UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = dataclasses.MISSING, __UpperCAmelCase = dataclasses.MISSING, __UpperCAmelCase = None, **__UpperCAmelCase, ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case_ = {}
if aliases is not None:
snake_case_ = aliases
if help is not None:
snake_case_ = help
return dataclasses.field(metadata=__UpperCAmelCase, default=__UpperCAmelCase, default_factory=__UpperCAmelCase, **__UpperCAmelCase )
class a ( _lowerCamelCase ):
snake_case_ = 42
def __init__( self : Union[str, Any] , lowercase_ : Union[DataClassType, Iterable[DataClassType]] , **lowercase_ : Optional[int] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
snake_case_ = ArgumentDefaultsHelpFormatter
super().__init__(**lowercase_ )
if dataclasses.is_dataclass(lowercase_ ):
snake_case_ = [dataclass_types]
snake_case_ = list(lowercase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowercase_ )
@staticmethod
def A_ ( lowercase_ : ArgumentParser , lowercase_ : dataclasses.Field ):
snake_case_ = F"--{field.name}"
snake_case_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowercase_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
snake_case_ = kwargs.pop('''aliases''' , [] )
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = [aliases]
snake_case_ = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(lowercase_ , '''UnionType''' ) and isinstance(lowercase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F" Problem encountered in field '{field.name}'." )
if type(lowercase_ ) not in field.type.__args__:
# filter `str` in Union
snake_case_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case_ = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case_ = (
field.type.__args__[0] if isinstance(lowercase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case_ = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case_ = {}
if origin_type is Literal or (isinstance(field.type , lowercase_ ) and issubclass(field.type , lowercase_ )):
if origin_type is Literal:
snake_case_ = field.type.__args__
else:
snake_case_ = [x.value for x in field.type]
snake_case_ = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
else:
snake_case_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case_ = copy(lowercase_ )
# Hack because type=bool in argparse does not behave as we want.
snake_case_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case_ = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case_ = '''?'''
# This is the value that will get picked if we do --field_name (without value)
snake_case_ = True
elif isclass(lowercase_ ) and issubclass(lowercase_ , lowercase_ ):
snake_case_ = field.type.__args__[0]
snake_case_ = '''+'''
if field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case_ = True
else:
snake_case_ = field.type
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
else:
snake_case_ = True
parser.add_argument(lowercase_ , *lowercase_ , **lowercase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case_ = False
parser.add_argument(F"--no_{field.name}" , action='''store_false''' , dest=field.name , **lowercase_ )
def A_ ( self : str , lowercase_ : DataClassType ):
if hasattr(lowercase_ , '''_argument_group_name''' ):
snake_case_ = self.add_argument_group(dtype._argument_group_name )
else:
snake_case_ = self
try:
snake_case_ = get_type_hints(lowercase_ )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowercase_ ):
snake_case_ = '''.'''.join(map(lowercase_ , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowercase_ ):
if not field.init:
continue
snake_case_ = type_hints[field.name]
self._parse_dataclass_field(lowercase_ , lowercase_ )
def A_ ( self : Union[str, Any] , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=True , lowercase_ : Dict=None , lowercase_ : str=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case_ = []
if args_filename:
args_files.append(Path(lowercase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case_ = ArgumentParser()
args_file_parser.add_argument(lowercase_ , type=lowercase_ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case_ ,snake_case_ = args_file_parser.parse_known_args(args=lowercase_ )
snake_case_ = vars(lowercase_ ).get(args_file_flag.lstrip('''-''' ) , lowercase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] )
snake_case_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case_ = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case_ ,snake_case_ = self.parse_known_args(args=lowercase_ )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
snake_case_ = {k: v for k, v in vars(lowercase_ ).items() if k in keys}
for k in keys:
delattr(lowercase_ , lowercase_ )
snake_case_ = dtype(**lowercase_ )
outputs.append(lowercase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowercase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def A_ ( self : Union[str, Any] , lowercase_ : Dict[str, Any] , lowercase_ : bool = False ):
snake_case_ = set(args.keys() )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
snake_case_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case_ = dtype(**lowercase_ )
outputs.append(lowercase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}" )
return tuple(lowercase_ )
def A_ ( self : str , lowercase_ : str , lowercase_ : bool = False ):
with open(Path(lowercase_ ) , encoding='''utf-8''' ) as open_json_file:
snake_case_ = json.loads(open_json_file.read() )
snake_case_ = self.parse_dict(lowercase_ , allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
def A_ ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False ):
snake_case_ = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) , allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
| 640 |
'''simple docstring'''
from collections.abc import Generator
def __magic_name__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
snake_case_ ,snake_case_ = 0, 1
while True:
snake_case_ ,snake_case_ = b, a + b
yield b
def __magic_name__ ( __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = fibonacci_generator()
while len(str(next(__UpperCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 640 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 99
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 37
SCREAMING_SNAKE_CASE_ = '''gelu'''
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 512
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 0.02
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = None
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> str:
SCREAMING_SNAKE_CASE_ = TFRoFormerModel(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = TFRoFormerForCausalLM(config=_A )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(_A )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = TFRoFormerForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFRoFormerForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> str:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = TFRoFormerForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFRoFormerForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = TFRoFormerForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ =(
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self , _A , _A , _A , _A , _A ) -> List[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TFRoFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , hidden_size=37 )
def _UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(_A )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
SCREAMING_SNAKE_CASE_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = model(_A )[0]
# TODO Replace vocab size
SCREAMING_SNAKE_CASE_ = 50000
SCREAMING_SNAKE_CASE_ = [1, 6, vocab_size]
self.assertEqual(output.shape , _A )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =1E-4
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = tf.constant([[4, 10]] )
SCREAMING_SNAKE_CASE_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
SCREAMING_SNAKE_CASE_ = emba(input_ids.shape )
SCREAMING_SNAKE_CASE_ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(_A , _A , atol=self.tolerance )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
SCREAMING_SNAKE_CASE_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
SCREAMING_SNAKE_CASE_ = emba.weight[:3, :5]
tf.debugging.assert_near(_A , _A , atol=self.tolerance )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =1E-4
def _UpperCamelCase ( self ) -> Tuple:
# 2,12,16,64
SCREAMING_SNAKE_CASE_ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
SCREAMING_SNAKE_CASE_ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
SCREAMING_SNAKE_CASE_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
SCREAMING_SNAKE_CASE_ = embed_positions([2, 16, 768] )[None, None, :, :]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_A , _A , _A )
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _A , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _A , atol=self.tolerance )
| 720 |
from PIL import Image
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = image.load()
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__lowerCamelCase ):
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__UpperCAmelCase = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 597 | 0 |
import numpy as np
class lowerCamelCase_ :
def __init__( self : Dict ):
'''simple docstring'''
a = (0, 0)
a = None
a = 0
a = 0
a = 0
def __eq__( self : Optional[int] ,__lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.position == cell.position
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
print(self.position )
class lowerCamelCase_ :
def __init__( self : List[str] ,__lowerCamelCase : List[Any]=(5, 5) ):
'''simple docstring'''
a = np.zeros(__lowerCamelCase )
a = world_size[0]
a = world_size[1]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
print(self.w )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
a = cell.position[0]
a = cell.position[1]
a = []
for n in neughbour_cord:
a = current_x + n[0]
a = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
a = Cell()
a = (x, y)
a = cell
neighbours.append(__lowerCamelCase )
return neighbours
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> str:
"""simple docstring"""
a = []
a = []
_open.append(snake_case_ )
while _open:
a = np.argmin([n.f for n in _open] )
a = _open[min_f]
_closed.append(_open.pop(snake_case_ ) )
if current == goal:
break
for n in world.get_neigbours(snake_case_ ):
for c in _closed:
if c == n:
continue
a = current.g + 1
a , a = n.position
a , a = goal.position
a = (ya - ya) ** 2 + (xa - xa) ** 2
a = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(snake_case_ )
a = []
while current.parent is not None:
path.append(current.position )
a = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCamelCase__ : List[str] = Gridworld()
# Start position and goal
UpperCamelCase__ : Optional[int] = Cell()
UpperCamelCase__ : List[str] = (0, 0)
UpperCamelCase__ : Union[str, Any] = Cell()
UpperCamelCase__ : List[str] = (4, 4)
print(F"path from {start.position} to {goal.position}")
UpperCamelCase__ : Union[str, Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCamelCase__ : Union[str, Any] = 1
print(world.w)
| 387 |
import numpy as np
class lowerCamelCase_ :
def __init__( self : Dict ):
'''simple docstring'''
a = (0, 0)
a = None
a = 0
a = 0
a = 0
def __eq__( self : Optional[int] ,__lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.position == cell.position
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
print(self.position )
class lowerCamelCase_ :
def __init__( self : List[str] ,__lowerCamelCase : List[Any]=(5, 5) ):
'''simple docstring'''
a = np.zeros(__lowerCamelCase )
a = world_size[0]
a = world_size[1]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
print(self.w )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
a = cell.position[0]
a = cell.position[1]
a = []
for n in neughbour_cord:
a = current_x + n[0]
a = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
a = Cell()
a = (x, y)
a = cell
neighbours.append(__lowerCamelCase )
return neighbours
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> str:
"""simple docstring"""
a = []
a = []
_open.append(snake_case_ )
while _open:
a = np.argmin([n.f for n in _open] )
a = _open[min_f]
_closed.append(_open.pop(snake_case_ ) )
if current == goal:
break
for n in world.get_neigbours(snake_case_ ):
for c in _closed:
if c == n:
continue
a = current.g + 1
a , a = n.position
a , a = goal.position
a = (ya - ya) ** 2 + (xa - xa) ** 2
a = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(snake_case_ )
a = []
while current.parent is not None:
path.append(current.position )
a = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCamelCase__ : List[str] = Gridworld()
# Start position and goal
UpperCamelCase__ : Optional[int] = Cell()
UpperCamelCase__ : List[str] = (0, 0)
UpperCamelCase__ : Union[str, Any] = Cell()
UpperCamelCase__ : List[str] = (4, 4)
print(F"path from {start.position} to {goal.position}")
UpperCamelCase__ : Union[str, Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCamelCase__ : Union[str, Any] = 1
print(world.w)
| 387 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 534 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 534 | 1 |
'''simple docstring'''
import os
import sys
__UpperCAmelCase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCAmelCase = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _snake_case ( *A , **A ) -> Optional[int]:
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _snake_case ( *A , **A ) -> Optional[int]:
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def _snake_case ( *A , **A ) -> List[Any]:
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _snake_case ( *A , **A ) -> Any:
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _snake_case ( *A , **A ) -> Union[str, Any]:
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _snake_case ( *A , **A ) -> int:
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _snake_case ( *A , **A ) -> Optional[Any]:
return AutoModelForQuestionAnswering.from_pretrained(*A , **A ) | 90 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class a__ ( A__ ):
A = 'fnet'
def __init__( self : Any,_A : str=3_2000,_A : Union[str, Any]=768,_A : int=12,_A : List[Any]=3072,_A : Dict="gelu_new",_A : Optional[int]=0.1,_A : Tuple=512,_A : Optional[Any]=4,_A : Any=0.02,_A : Optional[Any]=1E-12,_A : List[Any]=False,_A : Union[str, Any]=512,_A : Any=3,_A : str=1,_A : Any=2,**_A : int,):
"""simple docstring"""
super().__init__(pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = use_tpu_fourier_optimizations
SCREAMING_SNAKE_CASE_ : List[Any] = tpu_short_seq_length
| 316 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( A__ ):
A = 'naver-clova-ix/donut-base-finetuned-docvqa'
A = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
A = 'document_qa'
A = AutoProcessor
A = VisionEncoderDecoderModel
A = ['image', 'text']
A = ['text']
def __init__( self : List[Any],*_A : Any,**_A : Dict ):
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*_A,**_A )
def __UpperCamelCase ( self : int,_A : "Image",_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
SCREAMING_SNAKE_CASE_ : List[str] = task_prompt.replace("{user_input}",_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pre_processor.tokenizer(
_A,add_special_tokens=_A,return_tensors="pt" ).input_ids
SCREAMING_SNAKE_CASE_ : str = self.pre_processor(_A,return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ),decoder_input_ids=inputs["decoder_input_ids"].to(self.device ),max_length=self.model.decoder.config.max_position_embeddings,early_stopping=_A,pad_token_id=self.pre_processor.tokenizer.pad_token_id,eos_token_id=self.pre_processor.tokenizer.eos_token_id,use_cache=_A,num_beams=1,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]],return_dict_in_generate=_A,).sequences
def __UpperCamelCase ( self : List[Any],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.pre_processor.batch_decode(_A )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token,"" )
SCREAMING_SNAKE_CASE_ : Any = sequence.replace(self.pre_processor.tokenizer.pad_token,"" )
SCREAMING_SNAKE_CASE_ : Optional[int] = re.sub(R"<.*?>","",_A,count=1 ).strip() # remove first task start token
SCREAMING_SNAKE_CASE_ : Any = self.pre_processor.tokenajson(_A )
return sequence["answer"]
| 316 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase:
"""simple docstring"""
@staticmethod
def __a ( *lowerCamelCase , **lowerCamelCase ) -> Dict:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = ObjectDetectionPipeline(model=lowerCamelCase , image_processor=lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __a ( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase , {
"score": ANY(lowerCamelCase ),
"label": ANY(lowerCamelCase ),
"box": {"xmin": ANY(lowerCamelCase ), "ymin": ANY(lowerCamelCase ), "xmax": ANY(lowerCamelCase ), "ymax": ANY(lowerCamelCase )},
} , )
import datasets
lowercase__ : int = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowercase__ : Optional[Any] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
lowercase__ : int = object_detector(lowerCamelCase , threshold=0.0 )
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase , {
"score": ANY(lowerCamelCase ),
"label": ANY(lowerCamelCase ),
"box": {"xmin": ANY(lowerCamelCase ), "ymin": ANY(lowerCamelCase ), "xmax": ANY(lowerCamelCase ), "ymax": ANY(lowerCamelCase )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : int = "hf-internal-testing/tiny-detr-mobilenetsv3"
lowercase__ : Dict = AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
lowercase__ : List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase )
lowercase__ : Dict = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
lowercase__ : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
lowercase__ : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = "facebook/detr-resnet-50"
lowercase__ : int = AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
lowercase__ : Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase )
lowercase__ : Any = ObjectDetectionPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
lowercase__ : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowercase__ : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : str = "facebook/detr-resnet-50"
lowercase__ : str = pipeline("object-detection" , model=lowerCamelCase )
lowercase__ : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowercase__ : int = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = 0.99_85
lowercase__ : List[Any] = "facebook/detr-resnet-50"
lowercase__ : Tuple = pipeline("object-detection" , model=lowerCamelCase )
lowercase__ : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : int = "Narsil/layoutlmv3-finetuned-funsd"
lowercase__ : Any = 0.99_93
lowercase__ : Union[str, Any] = pipeline("object-detection" , model=lowerCamelCase , threshold=lowerCamelCase )
lowercase__ : int = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 397 |
from math import factorial
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
lowercase__ : Dict = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase__ : Tuple = float(factorial(SCREAMING_SNAKE_CASE_ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 397 | 1 |
'''simple docstring'''
import torch
from torch import nn
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , a__ , a__=1 , a__=False ):
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = n_token
__SCREAMING_SNAKE_CASE : List[Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_proj
__SCREAMING_SNAKE_CASE : Optional[int] = cutoffs + [n_token]
__SCREAMING_SNAKE_CASE : str = [0] + self.cutoffs
__SCREAMING_SNAKE_CASE : str = div_val
__SCREAMING_SNAKE_CASE : List[str] = self.cutoffs[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.cutoffs ) - 1
__SCREAMING_SNAKE_CASE : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList()
__SCREAMING_SNAKE_CASE : List[str] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a__ , a__ ) ) )
else:
self.out_projs.append(a__ )
self.out_layers.append(nn.Linear(a__ , a__ ) )
else:
for i in range(len(self.cutoffs ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__SCREAMING_SNAKE_CASE : str = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a__ , a__ ) ) )
self.out_layers.append(nn.Linear(a__ , r_idx - l_idx ) )
__SCREAMING_SNAKE_CASE : List[str] = keep_order
def a_ ( self , a__ , a__ , a__ , a__ ):
if proj is None:
__SCREAMING_SNAKE_CASE : List[str] = nn.functional.linear(a__ , a__ , bias=a__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__SCREAMING_SNAKE_CASE : Optional[int] = nn.functional.linear(a__ , proj.t().contiguous() )
__SCREAMING_SNAKE_CASE : List[str] = nn.functional.linear(a__ , a__ , bias=a__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def a_ ( self , a__ , a__=None , a__=False ):
if labels is not None:
# Shift so that tokens < n predict n
__SCREAMING_SNAKE_CASE : str = hidden[..., :-1, :].contiguous()
__SCREAMING_SNAKE_CASE : Tuple = labels[..., 1:].contiguous()
__SCREAMING_SNAKE_CASE : Tuple = hidden.view(-1 , hidden.size(-1 ) )
__SCREAMING_SNAKE_CASE : int = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
__SCREAMING_SNAKE_CASE : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__SCREAMING_SNAKE_CASE : List[str] = self._compute_logit(a__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = labels != -100
__SCREAMING_SNAKE_CASE : Any = torch.zeros_like(a__ , dtype=hidden.dtype , device=hidden.device )
__SCREAMING_SNAKE_CASE : str = (
-nn.functional.log_softmax(a__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__SCREAMING_SNAKE_CASE : Dict = nn.functional.log_softmax(a__ , dim=-1 )
else:
# construct weights and biases
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__SCREAMING_SNAKE_CASE : int = self.out_layers[0].weight[l_idx:r_idx]
__SCREAMING_SNAKE_CASE : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.out_layers[i].weight
__SCREAMING_SNAKE_CASE : Dict = self.out_layers[i].bias
if i == 0:
__SCREAMING_SNAKE_CASE : str = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a__ )
biases.append(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = weights[0], biases[0], self.out_projs[0]
__SCREAMING_SNAKE_CASE : Tuple = self._compute_logit(a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.log_softmax(a__ , dim=1 )
if labels is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(a__ , dtype=hidden.dtype , device=hidden.device )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : str = [0] + self.cutoffs
for i in range(len(a__ ) - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__SCREAMING_SNAKE_CASE : List[Any] = (labels >= l_idx) & (labels < r_idx)
__SCREAMING_SNAKE_CASE : List[str] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__SCREAMING_SNAKE_CASE : List[Any] = labels.index_select(0 , a__ ) - l_idx
__SCREAMING_SNAKE_CASE : Optional[int] = head_logprob.index_select(0 , a__ )
__SCREAMING_SNAKE_CASE : List[Any] = hidden.index_select(0 , a__ )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden
if i == 0:
if labels is not None:
__SCREAMING_SNAKE_CASE : Any = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
__SCREAMING_SNAKE_CASE : List[Any] = self._compute_logit(a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.functional.log_softmax(a__ , dim=1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__SCREAMING_SNAKE_CASE : int = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , a__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def a_ ( self , a__ ):
if self.n_clusters == 0:
__SCREAMING_SNAKE_CASE : List[str] = self._compute_logit(a__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a__ , dim=-1 )
else:
# construct weights and biases
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__SCREAMING_SNAKE_CASE : Any = self.out_layers[0].weight[l_idx:r_idx]
__SCREAMING_SNAKE_CASE : int = self.out_layers[0].bias[l_idx:r_idx]
else:
__SCREAMING_SNAKE_CASE : str = self.out_layers[i].weight
__SCREAMING_SNAKE_CASE : str = self.out_layers[i].bias
if i == 0:
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__SCREAMING_SNAKE_CASE : Dict = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a__ )
biases.append(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = weights[0], biases[0], self.out_projs[0]
__SCREAMING_SNAKE_CASE : str = self._compute_logit(a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__SCREAMING_SNAKE_CASE : Any = nn.functional.log_softmax(a__ , dim=1 )
__SCREAMING_SNAKE_CASE : Tuple = [0] + self.cutoffs
for i in range(len(a__ ) - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__SCREAMING_SNAKE_CASE : Any = head_logprob[:, : self.cutoffs[0]]
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = weights[i], biases[i], self.out_projs[i]
__SCREAMING_SNAKE_CASE : int = self._compute_logit(a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.functional.log_softmax(a__ , dim=1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
__SCREAMING_SNAKE_CASE : Any = logprob_i
return out
| 564 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 564 | 1 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase__ : Optional[int] = 6_3_7_8_1_3_7.0
UpperCAmelCase__ : Any = 6_3_5_6_7_5_2.3_1_4_2_4_5
UpperCAmelCase__ : List[str] = 6_37_81_37
def A ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
'''simple docstring'''
lowerCAmelCase__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase__ = atan((1 - flattening) * tan(radians(UpperCamelCase_ ) ) )
lowerCAmelCase__ = atan((1 - flattening) * tan(radians(UpperCamelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase__ = haversine_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase__ = (b_lata + b_lata) / 2
lowerCAmelCase__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase__ = (sin(UpperCamelCase_ ) ** 2) * (cos(UpperCamelCase_ ) ** 2)
lowerCAmelCase__ = cos(sigma / 2 ) ** 2
lowerCAmelCase__ = (sigma - sin(UpperCamelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase__ = (cos(UpperCamelCase_ ) ** 2) * (sin(UpperCamelCase_ ) ** 2)
lowerCAmelCase__ = sin(sigma / 2 ) ** 2
lowerCAmelCase__ = (sigma + sin(UpperCamelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __magic_name__ ( self : List[Any] ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple =ort.SessionOptions()
SCREAMING_SNAKE_CASE__ : Dict =False
return options
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__ : str =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Any ='''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__ : Optional[int] =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : int =pipe(
prompt=__lowercase , image=__lowercase , mask_image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2 | 296 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=64 , A_=5 , A_=4 , A_=64 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Optional[Any] = is_training
_UpperCAmelCase : Optional[int] = use_input_mask
_UpperCAmelCase : Union[str, Any] = use_token_type_ids
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[Any] = num_labels
_UpperCAmelCase : Tuple = num_choices
_UpperCAmelCase : Optional[int] = scope
def _UpperCAmelCase ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
_UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = MPNetModel(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : str = model(A_ , A_ )
_UpperCAmelCase : List[Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = MPNetForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Dict = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : str = MPNetForSequenceClassification(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.num_choices
_UpperCAmelCase : List[str] = MPNetForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Any = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Dict = MPNetForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Tuple = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = config_and_inputs
_UpperCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_lowercase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_lowercase = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = True
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = MPNetModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=A_ , hidden_size=37 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A_ )
@require_torch
class a ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = MPNetModel.from_pretrained("microsoft/mpnet-base" )
_UpperCAmelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase : Optional[Any] = model(A_ )[0]
_UpperCAmelCase : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A_ )
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) )
| 467 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: nn.ModuleList , lowerCAmelCase: nn.ModuleList , lowerCAmelCase: List[int] ) -> None:
_UpperCAmelCase : str = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F'{len(lowerCAmelCase )} != {len(lowerCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
SCREAMING_SNAKE_CASE_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
SCREAMING_SNAKE_CASE_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: List[str] ) -> Dict:
try:
_UpperCAmelCase : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(lowerCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, PreTrainedModel] , lowerCAmelCase: Union[str, Path] = "student" , lowerCAmelCase: Union[int, None] = None , lowerCAmelCase: Union[int, None] = None , lowerCAmelCase: str=False , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: Any=None , **lowerCAmelCase: str , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
_UpperCAmelCase : Dict = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
_UpperCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F'teacher must be a model or string got type {type(lowerCAmelCase )}'
_UpperCAmelCase : List[str] = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase : Any = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : int = teacher_e
if d is None:
_UpperCAmelCase : Optional[Any] = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
_UpperCAmelCase , _UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase : Dict = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[Any] = teacher_e
if d is None:
_UpperCAmelCase : Optional[int] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase )
_UpperCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : List[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase : Any = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
_UpperCAmelCase : str = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 467 | 1 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__magic_name__ =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __UpperCamelCase ( A , A , A , A , A ):
for attribute in key.split('''.''' ):
UpperCamelCase__ = getattr(A , A )
if weight_type is not None:
UpperCamelCase__ = getattr(A , A ).shape
else:
UpperCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = []
UpperCamelCase__ = fairseq_model.state_dict()
UpperCamelCase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCamelCase__ = None
for name, value in fairseq_dict.items():
UpperCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ = True
elif name.split('''.''' )[0] == "proj":
UpperCamelCase__ = fairseq_model.proj
UpperCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(A )[0].split('''.''' )[-2]
UpperCamelCase__ = mapped_key.replace('''*''' , A )
if "weight_g" in name:
UpperCamelCase__ = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ = '''bias'''
elif "weight" in name:
UpperCamelCase__ = '''weight'''
else:
UpperCamelCase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __UpperCamelCase ( A , A , A , A , A ):
UpperCamelCase__ = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ = name.split('''.''' )
UpperCamelCase__ = int(items[0] )
UpperCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A )
def __UpperCamelCase ( A ):
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(A , A , bias=A )
UpperCamelCase__ = emb.weight.data
return lin_layer
def __UpperCamelCase ( A ):
with open(A , '''r''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = [line.split(''' ''' )[0] for line in lines]
UpperCamelCase__ = len(A )
UpperCamelCase__ = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCamelCase ( A , A , A , A , A , A , A , ):
UpperCamelCase__ = WavaVecaConfig.from_pretrained(A )
UpperCamelCase__ = SpeechaTextaConfig.from_pretrained(
A , vocab_size=A , decoder_layers=A , do_stable_layer_norm=A )
UpperCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCamelCase__ = model[0].eval()
# set weights for wav2vec2 encoder
UpperCamelCase__ = WavaVecaModel(A )
UpperCamelCase__ = recursively_load_weights_wavaveca(model.encoder , A )
UpperCamelCase__ = SpeechaTextaForCausalLM(A )
UpperCamelCase__ , UpperCamelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
UpperCamelCase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
UpperCamelCase__ = SpeechEncoderDecoderModel(encoder=A , decoder=A )
UpperCamelCase__ = False
# add projection layer
UpperCamelCase__ = nn.Parameter(projection_layer.weight )
UpperCamelCase__ = nn.Parameter(projection_layer.bias )
UpperCamelCase__ = create_vocab_dict(A )
with open(os.path.join(A , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(A , A )
UpperCamelCase__ = SpeechaTextaTokenizer(os.path.join(A , '''vocab.json''' ) )
tokenizer.save_pretrained(A )
UpperCamelCase__ = hf_wavavec.config.to_dict()
UpperCamelCase__ = tokenizer.pad_token_id
UpperCamelCase__ = tokenizer.bos_token_id
UpperCamelCase__ = tokenizer.eos_token_id
UpperCamelCase__ = '''speech_to_text_2'''
UpperCamelCase__ = '''wav2vec2'''
UpperCamelCase__ = SpeechEncoderDecoderConfig.from_dict(A )
hf_wavavec.save_pretrained(A )
feature_extractor.save_pretrained(A )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__magic_name__ =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 415 | import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ =get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__magic_name__ =250004
__magic_name__ =250020
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any =MBartTokenizer
SCREAMING_SNAKE_CASE_ : Any =MBartTokenizerFast
SCREAMING_SNAKE_CASE_ : Optional[int] =True
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
def _a (self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _a (self ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCamelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] ="facebook/mbart-large-en-ro"
SCREAMING_SNAKE_CASE_ : Any =[
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
SCREAMING_SNAKE_CASE_ : str =[82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def _a (cls ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
UpperCamelCase__ = 1
return cls
def _a (self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_0020 )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Dict:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
UpperCamelCase__ = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
UpperCamelCase__ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 10
UpperCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_0026, 25_0001] )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
UpperCamelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='''pt''' )
UpperCamelCase__ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='''pt''' )
UpperCamelCase__ = targets['''input_ids''']
UpperCamelCase__ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
} , )
| 415 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
snake_case: Optional[Any] = 1
snake_case: List[Any] = 3
snake_case: List[str] = (32, 32)
snake_case: List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case: Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
snake_case: List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case: Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__lowerCamelCase )
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
def extract(*__lowerCamelCase , **__lowerCamelCase ):
class lowerCamelCase :
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
snake_case: Union[str, Any] = torch.ones([0] )
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
snake_case: Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case: Union[str, Any] = self.dummy_cond_unet
snake_case: Tuple = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
snake_case: Optional[Any] = self.dummy_vae
snake_case: Optional[int] = self.dummy_text_encoder
snake_case: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
snake_case: List[str] = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
snake_case: Dict = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case: int = """A painting of a squirrel eating a burger"""
snake_case: List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
snake_case: Optional[int] = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
snake_case: List[str] = output.images
snake_case: Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
snake_case: List[str] = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCamelCase , )[0]
snake_case: Optional[Any] = image[0, -3:, -3:, -1]
snake_case: Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case: Optional[Any] = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
snake_case: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case: str = self.dummy_cond_unet
snake_case: List[str] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
snake_case: Any = self.dummy_vae
snake_case: int = self.dummy_text_encoder
snake_case: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
snake_case: Optional[Any] = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
snake_case: str = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case: str = """A painting of a squirrel eating a burger"""
snake_case: Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
snake_case: List[str] = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
snake_case: int = output.images
snake_case: List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
snake_case: List[str] = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCamelCase , )[0]
snake_case: int = image[0, -3:, -3:, -1]
snake_case: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case: Union[str, Any] = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
snake_case: str = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(pipe.scheduler , __lowerCamelCase )
assert pipe.safety_checker is None
snake_case: Optional[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
snake_case: List[Any] = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
snake_case: int = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case: List[str] = self.dummy_cond_unet
snake_case: Optional[Any] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
snake_case: List[Any] = self.dummy_vae
snake_case: List[Any] = self.dummy_text_encoder
snake_case: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
snake_case: Tuple = unet.half()
snake_case: Optional[Any] = vae.half()
snake_case: Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case: str = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
snake_case: Dict = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case: List[Any] = """A painting of a squirrel eating a burger"""
snake_case: Dict = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
snake_case: str = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCamelCase )
snake_case: Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
snake_case: Tuple = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case: List[str] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
snake_case: List[str] = 40_03_66_03_46
snake_case: List[str] = 7
# without safety guidance (sld_guidance_scale = 0)
snake_case: List[Any] = torch.manual_seed(__lowerCamelCase )
snake_case: str = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
snake_case: Optional[int] = output.images
snake_case: int = image[0, -3:, -3:, -1]
snake_case: Any = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
snake_case: Optional[Any] = torch.manual_seed(__lowerCamelCase )
snake_case: List[Any] = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
snake_case: str = output.images
snake_case: List[str] = image[0, -3:, -3:, -1]
snake_case: Optional[Any] = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
snake_case: int = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCamelCase )
snake_case: Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
snake_case: Tuple = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case: str = """padme amidala taking a bath artwork, safe for work, no nudity"""
snake_case: List[str] = 27_34_97_17_55
snake_case: Tuple = 7
snake_case: int = torch.manual_seed(__lowerCamelCase )
snake_case: str = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
snake_case: Optional[int] = output.images
snake_case: Tuple = image[0, -3:, -3:, -1]
snake_case: Any = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
snake_case: str = torch.manual_seed(__lowerCamelCase )
snake_case: Optional[int] = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
snake_case: List[str] = output.images
snake_case: List[str] = image[0, -3:, -3:, -1]
snake_case: Optional[int] = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case: str = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
snake_case: int = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
snake_case: List[Any] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
snake_case: Tuple = 10_44_35_52_34
snake_case: List[Any] = 12
snake_case: str = torch.manual_seed(__lowerCamelCase )
snake_case: Optional[Any] = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
snake_case: Optional[int] = output.images
snake_case: Any = image[0, -3:, -3:, -1]
snake_case: Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
snake_case: Union[str, Any] = torch.manual_seed(__lowerCamelCase )
snake_case: Optional[int] = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
snake_case: Any = output.images
snake_case: Any = image[0, -3:, -3:, -1]
snake_case: Dict = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 164 | import math
def a_ (_lowerCAmelCase : list , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 0 )-> list:
snake_case: List[str] = end or len(_lowerCAmelCase )
for i in range(_lowerCAmelCase , _lowerCAmelCase ):
snake_case: Union[str, Any] = i
snake_case: List[str] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
snake_case: str = array[temp_index - 1]
temp_index -= 1
snake_case: int = temp_index_value
return array
def a_ (_lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : int )-> None: # Max Heap
snake_case: str = index
snake_case: Union[str, Any] = 2 * index + 1 # Left Node
snake_case: List[Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
snake_case: List[str] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
snake_case: Optional[Any] = right_index
if largest != index:
snake_case , snake_case: Optional[Any] = array[largest], array[index]
heapify(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def a_ (_lowerCAmelCase : list )-> list:
snake_case: List[str] = len(_lowerCAmelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for i in range(n - 1 , 0 , -1 ):
snake_case , snake_case: List[str] = array[0], array[i]
heapify(_lowerCAmelCase , 0 , _lowerCAmelCase )
return array
def a_ (_lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int )-> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a_ (_lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int )-> int:
snake_case: str = low
snake_case: Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
snake_case , snake_case: List[str] = array[j], array[i]
i += 1
def a_ (_lowerCAmelCase : list )-> list:
if len(_lowerCAmelCase ) == 0:
return array
snake_case: Union[str, Any] = 2 * math.ceil(math.loga(len(_lowerCAmelCase ) ) )
snake_case: Dict = 16
return intro_sort(_lowerCAmelCase , 0 , len(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase )
def a_ (_lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int )-> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_lowerCAmelCase )
max_depth -= 1
snake_case: Optional[Any] = median_of_a(_lowerCAmelCase , _lowerCAmelCase , start + ((end - start) // 2) + 1 , end - 1 )
snake_case: Union[str, Any] = partition(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
intro_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case: Union[str, Any] = p
return insertion_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Any = input('Enter numbers separated by a comma : ').strip()
__lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 164 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Any=False , lowercase_ : Any=False , lowercase_ : Union[str, Any]=False ):
lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Optional[int] ):
for i in range(config.num_hidden_layers ):
lowercase = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowercase = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[
: config.hidden_size, :
]
lowercase = in_proj_bias[: config.hidden_size]
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
lowercase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
lowercase = dct.pop(lowercase_ )
lowercase = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Union[str, Any] ):
lowercase = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowercase_ )
lowercase = False
lowercase = False
lowercase = False
lowercase = False
if "vqa" in checkpoint_url:
lowercase = True
lowercase = 3129
lowercase = """huggingface/label-files"""
lowercase = """vqa2-id2label.json"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = ViltForQuestionAnswering(lowercase_ )
elif "nlvr" in checkpoint_url:
lowercase = True
lowercase = 2
lowercase = {0: """False""", 1: """True"""}
lowercase = {v: k for k, v in config.idalabel.items()}
lowercase = 3
lowercase = ViltForImagesAndTextClassification(lowercase_ )
elif "irtr" in checkpoint_url:
lowercase = True
lowercase = ViltForImageAndTextRetrieval(lowercase_ )
elif "mlm_itm" in checkpoint_url:
lowercase = True
lowercase = ViltForMaskedLM(lowercase_ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )["""state_dict"""]
lowercase = create_rename_keys(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ )
if mlm_model or irtr_model:
lowercase = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase , lowercase = model.load_state_dict(lowercase_ , strict=lowercase_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase_ )
# Define processor
lowercase = ViltImageProcessor(size=384 )
lowercase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowercase = ViltProcessor(lowercase_ , lowercase_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowercase_ ).raw )
lowercase = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowercase_ ).raw )
lowercase = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowercase = processor(lowercase_ , lowercase_ , return_tensors="""pt""" )
lowercase = processor(lowercase_ , lowercase_ , return_tensors="""pt""" )
lowercase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=lowercase_ ).raw )
if mlm_model:
lowercase = """a bunch of [MASK] laying on a [MASK]."""
else:
lowercase = """How many cats are there?"""
lowercase = processor(lowercase_ , lowercase_ , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
# Verify outputs
if mlm_model:
lowercase = torch.Size([1, 11, 3_0522] )
lowercase = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase_ , atol=1E-4 )
# verify masked token prediction equals "cats"
lowercase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase = torch.Size([1, 3129] )
lowercase = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase_ , atol=1E-4 )
# verify vqa prediction equals "2"
lowercase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase = torch.Size([1, 2] )
lowercase = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowercase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase_ : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 588 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase_ , lowercase_ ) ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
if dataset.ndim != value_array.ndim:
lowercase = (
"""Wrong input data's dimensions... """
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowercase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowercase = (
"""Wrong input data's shape... """
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowercase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowercase = (
"""Input data have different datatype... """
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowercase_ )
lowercase = []
for value in value_array:
lowercase = euclidean(lowercase_ , dataset[0] )
lowercase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowercase = euclidean(lowercase_ , lowercase_ )
if dist > temp_dist:
lowercase = temp_dist
lowercase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
return np.dot(lowercase_ , lowercase_ ) / (norm(lowercase_ ) * norm(lowercase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowercase :
def __init__( self : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : List[Any]=13 , _lowerCamelCase : List[str]=7 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : str=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : str=True , _lowerCamelCase : str=99 , _lowerCamelCase : str=32 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Union[str, Any]=37 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=5_12 , _lowerCamelCase : Optional[int]=16 , _lowerCamelCase : int=2 , _lowerCamelCase : Any=0.02 , _lowerCamelCase : Any=3 , _lowerCamelCase : Dict=4 , _lowerCamelCase : List[str]=None , _lowerCamelCase : Tuple=10_00 , ):
"""simple docstring"""
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : Dict = seq_length
A_ : Tuple = is_training
A_ : List[str] = use_input_mask
A_ : List[Any] = use_token_type_ids
A_ : str = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : int = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Optional[int] = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : Tuple = num_labels
A_ : Any = num_choices
A_ : Union[str, Any] = scope
A_ : Union[str, Any] = range_bbox
def a_ ( self : int ):
"""simple docstring"""
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A_ : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : int = bbox[i, j, 3]
A_ : Any = bbox[i, j, 1]
A_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : int = bbox[i, j, 2]
A_ : int = bbox[i, j, 0]
A_ : List[Any] = t
A_ : str = tf.convert_to_tensor(_lowerCamelCase )
A_ : Optional[int] = None
if self.use_input_mask:
A_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Optional[Any] = None
if self.use_token_type_ids:
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : int = None
A_ : Any = None
A_ : Optional[Any] = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : Dict = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : str = TFLayoutLMModel(config=_lowerCamelCase )
A_ : str = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
A_ : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase , token_type_ids=_lowerCamelCase )
A_ : Dict = model(_lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : str , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = TFLayoutLMForMaskedLM(config=_lowerCamelCase )
A_ : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = self.num_labels
A_ : Union[str, Any] = TFLayoutLMForSequenceClassification(config=_lowerCamelCase )
A_ : int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = self.num_labels
A_ : Tuple = TFLayoutLMForTokenClassification(config=_lowerCamelCase )
A_ : str = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Dict = TFLayoutLMForQuestionAnswering(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Optional[int] = config_and_inputs
A_ : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : List[str] = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[int] = 10
def a_ ( self : str ):
"""simple docstring"""
A_ : str = TFLayoutLMModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def a_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
@slow
def a_ ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = TFLayoutLMModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowercase_ ( ):
"""simple docstring"""
A_ : Any = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
A_ : Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A_ : Tuple = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A_ : Dict = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A_ : Dict = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
A_ , A_ , A_ , A_ , A_ : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Optional[Any] = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
# test the sequence output on [0, :3, :3]
A_ : str = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
A_ : Optional[int] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCamelCase , atol=1E-3 ) )
@slow
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
A_ , A_ , A_ , A_ , A_ : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Optional[int] = model(
input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A_ : Optional[Any] = outputs.loss
A_ : int = (2,)
self.assertEqual(loss.shape , _lowerCamelCase )
# test the shape of the logits
A_ : Optional[Any] = outputs.logits
A_ : List[Any] = (2, 2)
self.assertEqual(logits.shape , _lowerCamelCase )
@slow
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
A_ , A_ , A_ , A_ , A_ : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Optional[Any] = model(
input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
# test the shape of the logits
A_ : int = outputs.logits
A_ : List[str] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _lowerCamelCase )
@slow
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : List[str] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Optional[Any] = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
# test the shape of the logits
A_ : int = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _lowerCamelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCamelCase )
| 701 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase ( __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Tuple = XLNetTokenizer
__lowerCAmelCase : Optional[int] = XLNetTokenizerFast
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Any = True
def a_ ( self : List[str] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Optional[Any] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Optional[int] = '''<s>'''
A_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(_lowerCamelCase ) , 10_06 )
def a_ ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [2_85, 46, 10, 1_70, 3_82] )
A_ : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
A_ : int = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
A_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase )
A_ : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase )
A_ : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def a_ ( self : Dict ):
"""simple docstring"""
A_ : int = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
A_ : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
A_ : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Dict = {'''input_ids''': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 361 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Tuple=10 , __UpperCAmelCase : Tuple=[8, 16, 32, 64] , __UpperCAmelCase : Optional[int]=[1, 1, 2, 1] , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]="relu" , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=["stage2", "stage3", "stage4"] , __UpperCAmelCase : Tuple=[2, 3, 4] , __UpperCAmelCase : List[str]=1 , ) ->str:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = embeddings_size
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = hidden_act
a = num_labels
a = scope
a = len(A_ )
a = out_features
a = out_indices
a = num_groups
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = BitModel(config=A_ )
model.to(A_ )
model.eval()
a = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple ) ->Tuple:
"""simple docstring"""
a = self.num_labels
a = BitForImageClassification(A_ )
model.to(A_ )
model.eval()
a = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) ->str:
"""simple docstring"""
a = BitBackbone(config=A_ )
model.to(A_ )
model.eval()
a = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a = None
a = BitBackbone(config=A_ )
model.to(A_ )
model.eval()
a = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__snake_case = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def __lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
a = BitModelTester(self )
a = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(A_ )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __lowerCAmelCase ( self : List[str] ) ->Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(config=A_ )
for name, module in model.named_modules():
if isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ):
a = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(A_ , A_ ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a = layer_type
a = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(A_ , A_ , A_ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = BitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _a ( ) -> int:
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
a = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ )
# forward pass
with torch.no_grad():
a = model(**A_ )
# verify the logits
a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
a = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@require_torch
class lowercase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (BitBackbone,) if is_torch_available() else ()
__snake_case = BitConfig
__snake_case = False
def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
a = BitModelTester(self )
| 117 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _snake_case ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case__ ):
http_head('https://huggingface.co' ) | 91 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase : Union[str, Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase : Optional[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase : Union[str, Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase ( self : Any ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def UpperCamelCase ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]="auto" , lowerCAmelCase_ : List[Any]=-1 , lowerCAmelCase_ : str=0.9 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : List[str]=5_00 , lowerCAmelCase_ : Optional[int]="gpt2-large" , lowerCAmelCase_ : Union[str, Any]=-1 , lowerCAmelCase_ : Dict=10_24 , lowerCAmelCase_ : Optional[int]=25 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple=25 , ) -> Dict:
UpperCAmelCase_ = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 718 |
def _lowerCAmelCase ( __magic_name__ :int ):
UpperCAmelCase_ = int(__magic_name__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(__magic_name__ )
UpperCAmelCase_, UpperCAmelCase_ = divmod(__magic_name__ , 2 )
return binary_recursive(__magic_name__ ) + str(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ :str ):
UpperCAmelCase_ = str(__magic_name__ ).strip()
if not number:
raise ValueError('''No input value was provided''' )
UpperCAmelCase_ = '''-''' if number.startswith('''-''' ) else ''''''
UpperCAmelCase_ = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(__magic_name__ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 407 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ) -> Dict:
snake_case = WavaVecaForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
snake_case = downstream_dict['''projector.weight''']
snake_case = downstream_dict['''projector.bias''']
snake_case = downstream_dict['''model.post_net.linear.weight''']
snake_case = downstream_dict['''model.post_net.linear.bias''']
return model
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ) -> str:
snake_case = WavaVecaForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
snake_case = downstream_dict['''model.linear.weight''']
snake_case = downstream_dict['''model.linear.bias''']
return model
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Any:
snake_case = WavaVecaForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
snake_case = downstream_dict['''connector.weight''']
snake_case = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
snake_case = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
snake_case = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ) -> Tuple:
snake_case = torch.load(lowerCamelCase_ , map_location="""cpu""" )
snake_case = checkpoint['''Downstream''']
snake_case = WavaVecaConfig.from_pretrained(lowerCamelCase_ )
snake_case = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
snake_case = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith("""ForXVector""" ):
snake_case = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
snake_case = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 369 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCAmelCase__ : Any = tokenizer('''Hello there''' ,return_tensors='''np''' ).input_ids
lowerCAmelCase__ : Optional[Any] = tokenizer('''Hi I am''' ,return_tensors='''np''' ).input_ids
lowerCAmelCase__ : Any = shift_tokens_right(__lowerCamelCase ,model.config.pad_token_id ,model.config.decoder_start_token_id )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,decoder_input_ids=__lowerCamelCase ).logits
lowerCAmelCase__ : List[str] = optax.softmax_cross_entropy(__lowerCamelCase ,onehot(__lowerCamelCase ,logits.shape[-1] ) ).mean()
lowerCAmelCase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowerCAmelCase__ : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 647 | 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
UpperCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , '''sklearn''' )
return (preds == labels).mean()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , '''sklearn''' )
UpperCAmelCase__ : Any = simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , '''sklearn''' )
UpperCAmelCase__ : Any = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0]
UpperCAmelCase__ : List[str] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , '''sklearn''' )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), F"""Predictions and labels have mismatched lengths {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCAmelCase__ , lowerCAmelCase__ )
elif task_name == "qqp":
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , '''sklearn''' )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(lowerCAmelCase__ )
| 312 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
UpperCAmelCase__ : Optional[Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowerCAmelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Optional[int]=None ):
'''simple docstring'''
if subparsers is not None:
A: List[str] = subparsers.add_parser("""test""" )
else:
A: str = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=lowerCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
A: List[str] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
A: str = script_name
else:
A: Any = f'--config_file={args.config_file} {script_name}'
A: Any = ["""accelerate-launch"""] + test_args.split()
A: List[str] = execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
A: Optional[Any] = test_command_parser()
A: Optional[Any] = parser.parse_args()
test_command(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 135 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : Any ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__SCREAMING_SNAKE_CASE : Optional[Any] ='▁'
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A: Optional[int] = (
AddedToken(A , lstrip=A , rstrip=A , normalized=A )
if isinstance(A , A )
else mask_token
)
A: Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
A: Tuple = do_lower_case
A: Optional[Any] = remove_space
A: int = keep_accents
A: str = vocab_file
A: Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def a__ ( self ) -> Dict:
return len(self.sp_model )
def a__ ( self ) -> Any:
A: Optional[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
A: List[Any] = self.__dict__.copy()
A: List[str] = None
return state
def __setstate__( self , A ) -> Dict:
A: str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A: Tuple = {}
A: Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self , A ) -> List[Any]:
if self.remove_space:
A: str = """ """.join(inputs.strip().split() )
else:
A: Optional[Any] = inputs
A: int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A: Tuple = unicodedata.normalize("""NFKD""" , A )
A: Optional[Any] = """""".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
A: Tuple = outputs.lower()
return outputs
def a__ ( self , A ) -> List[str]:
A: List[str] = self.preprocess_text(A )
A: str = self.sp_model.encode(A , out_type=A )
A: str = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A: Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A: List[Any] = cur_pieces[1:]
else:
A: Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def a__ ( self , A ) -> Tuple:
return self.sp_model.PieceToId(A )
def a__ ( self , A ) -> List[str]:
return self.sp_model.IdToPiece(A )
def a__ ( self , A ) -> Any:
A: Any = []
A: Dict = """"""
A: int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
A: Dict = True
A: str = []
else:
current_sub_tokens.append(A )
A: List[Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def a__ ( self , A , A = None ) -> List[int]:
A: Any = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def a__ ( self , A , A = None ) -> List[int]:
A: List[str] = [self.sep_token_id]
A: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A: Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
A: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 135 | 1 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ) -> Dict:
"""simple docstring"""
_a = []
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[str] ) -> Dict:
"""simple docstring"""
return self.node_position[vertex]
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a = pos
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_a = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_a = 2 * start + 1
else:
_a = 2 * start + 2
if heap[smallest_child] < heap[start]:
_a = heap[smallest_child], positions[smallest_child]
_a = (
heap[start],
positions[start],
)
_a = temp, tempa
_a = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCAmelCase_ )
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
_a = position[index]
while index != 0:
_a = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_a = heap[parent]
_a = position[parent]
self.set_position(position[parent] , lowerCAmelCase_ )
else:
_a = val
_a = temp
self.set_position(lowerCAmelCase_ , lowerCAmelCase_ )
break
_a = parent
else:
_a = val
_a = temp
self.set_position(lowerCAmelCase_ , 0 )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_a = len(lowerCAmelCase_ ) // 2 - 1
for i in range(lowerCAmelCase_ , -1 , -1 ):
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
_a = positions[0]
_a = sys.maxsize
self.top_to_bottom(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return temp
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = Heap()
_a = [0] * len(_lowerCamelCase )
_a = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_a = [] # Heap of Distance of vertices from their neighboring vertex
_a = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
_a = []
_a = 1
_a = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_a = 0
_a = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
_a = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_a = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
_a = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
_a = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_snake_case = int(input('Enter number of edges: ').strip())
_snake_case = defaultdict(list)
for _ in range(edges_number):
_snake_case = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 704 |
'''simple docstring'''
import math
import unittest
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 377 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_A : Optional[int] = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : Tuple , snake_case_ : int=None , snake_case_ : Any=None , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=None , snake_case_ : Any=None , snake_case_ : Tuple=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
__lowerCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowercase :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]=13 , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , ) -> Optional[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = initializer_range
def a ( self : List[str] ) -> List[str]:
__lowerCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCAmelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 )
__lowerCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = prepare_blenderbot_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def a ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
__lowerCAmelCase = 20
__lowerCAmelCase = model_class_name(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase , __lowerCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
__lowerCAmelCase = 20
__lowerCAmelCase = model_class_name(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase , __lowerCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = 99
def a ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_config_and_data()
__lowerCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = lm_model(input_ids=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCAmelCase = lm_model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCAmelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 )
__lowerCAmelCase = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum()
__lowerCAmelCase = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(SCREAMING_SNAKE_CASE__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowercase ( UpperCAmelCase__ , unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE : str = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def a ( self : int ) -> Tuple:
__lowerCAmelCase = FlaxBlenderbotSmallModelTester(self )
def a ( self : Tuple ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE__ )
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return model.encode(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
def a ( self : Tuple ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowerCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , encoder_outputs=SCREAMING_SNAKE_CASE__ , )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a ( self : Optional[Any] ) -> Tuple:
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 427 | '''simple docstring'''
import argparse
import os
import re
_A : str = '''src/transformers'''
# Pattern that looks at the indentation in a line.
_A : List[str] = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
_A : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_A : Dict = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
_A : Union[str, Any] = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_A : Union[str, Any] = re.compile(r'''\[([^\]]+)\]''')
def UpperCamelCase_ ( snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = _re_indent.search(snake_case_ )
return "" if search is None else search.groups()[0]
def UpperCamelCase_ ( snake_case_ : Any , snake_case_ : Optional[int]="" , snake_case_ : str=None , snake_case_ : Dict=None ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(snake_case_ ):
index += 1
__lowerCAmelCase = ["""\n""".join(lines[:index] )]
else:
__lowerCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCAmelCase = [lines[index]]
index += 1
while index < len(snake_case_ ) and (end_prompt is None or not lines[index].startswith(snake_case_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(snake_case_ ) )
if index < len(snake_case_ ) - 1:
__lowerCAmelCase = [lines[index + 1]]
index += 1
else:
__lowerCAmelCase = []
else:
blocks.append("""\n""".join(snake_case_ ) )
__lowerCAmelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case_ ) > 0:
blocks.append("""\n""".join(snake_case_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCamelCase_ ( snake_case_ : str ) -> Optional[int]:
'''simple docstring'''
def _inner(snake_case_ : Union[str, Any] ):
return key(snake_case_ ).lower().replace("""_""" , """""" )
return _inner
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any]=None ) -> Any:
'''simple docstring'''
def noop(snake_case_ : Union[str, Any] ):
return x
if key is None:
__lowerCAmelCase = noop
# Constants are all uppercase, they go first.
__lowerCAmelCase = [obj for obj in objects if key(snake_case_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCAmelCase = [obj for obj in objects if key(snake_case_ )[0].isupper() and not key(snake_case_ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCAmelCase = [obj for obj in objects if not key(snake_case_ )[0].isupper()]
__lowerCAmelCase = ignore_underscore(snake_case_ )
return sorted(snake_case_ , key=snake_case_ ) + sorted(snake_case_ , key=snake_case_ ) + sorted(snake_case_ , key=snake_case_ )
def UpperCamelCase_ ( snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
def _replace(snake_case_ : Dict ):
__lowerCAmelCase = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
__lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCAmelCase = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(snake_case_ )] ) + "]"
__lowerCAmelCase = import_statement.split("""\n""" )
if len(snake_case_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCAmelCase = 2 if lines[1].strip() == """[""" else 1
__lowerCAmelCase = [(i, _re_strip_line.search(snake_case_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowerCAmelCase = sort_objects(snake_case_ , key=lambda snake_case_ : x[1] )
__lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowerCAmelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCAmelCase = keys[:-1]
__lowerCAmelCase = get_indent(lines[1] ) + """, """.join([f"""\"{k}\"""" for k in sort_objects(snake_case_ )] )
return "\n".join(snake_case_ )
else:
# Finally we have to deal with imports fitting on one line
__lowerCAmelCase = _re_bracket_content.sub(_replace , snake_case_ )
return import_statement
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Dict=True ) -> int:
'''simple docstring'''
with open(snake_case_ , encoding="""utf-8""" ) as f:
__lowerCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCAmelCase = split_code_in_indented_blocks(
snake_case_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(snake_case_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCAmelCase = main_blocks[block_idx]
__lowerCAmelCase = block.split("""\n""" )
# Get to the start of the imports.
__lowerCAmelCase = 0
while line_idx < len(snake_case_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCAmelCase = len(snake_case_ )
else:
line_idx += 1
if line_idx >= len(snake_case_ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCAmelCase = """\n""".join(block_lines[line_idx:-1] )
__lowerCAmelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowerCAmelCase = split_code_in_indented_blocks(snake_case_ , indent_level=snake_case_ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCAmelCase = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCAmelCase = [(pattern.search(snake_case_ ).groups()[0] if pattern.search(snake_case_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCAmelCase = [(i, key) for i, key in enumerate(snake_case_ ) if key is not None]
__lowerCAmelCase = [x[0] for x in sorted(snake_case_ , key=lambda snake_case_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCAmelCase = 0
__lowerCAmelCase = []
for i in range(len(snake_case_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(snake_case_ )
count += 1
# And we put our main block back together with its first and last line.
__lowerCAmelCase = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case_ ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(snake_case_ ) )
def UpperCamelCase_ ( snake_case_ : int=True ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
__lowerCAmelCase = sort_imports(os.path.join(snake_case_ , """__init__.py""" ) , check_only=snake_case_ )
if result:
__lowerCAmelCase = [os.path.join(snake_case_ , """__init__.py""" )]
if len(snake_case_ ) > 0:
raise ValueError(f"""Would overwrite {len(snake_case_ )} files, run `make style`.""" )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
_A : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 427 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def a_ ( lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
return (pow(lowerCamelCase , 2 ) + step) % modulus
for _ in range(lowerCamelCase ):
# These track the position within the cycle detection logic.
UpperCAmelCase__ = seed
UpperCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCAmelCase__ = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCAmelCase__ = gcd(hare - tortoise , lowerCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
lowerCAmelCase__ : Tuple = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
lowerCAmelCase__ : Tuple = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 711 | """simple docstring"""
import random
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : str ):
UpperCAmelCase__ = [ord(lowerCamelCase__ ) for i in text]
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i in plain:
UpperCAmelCase__ = random.randint(1 ,300 )
UpperCAmelCase__ = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ):
UpperCAmelCase__ = []
for i in range(len(lowerCamelCase__ ) ):
UpperCAmelCase__ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ : Dict = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 632 | 0 |
def UpperCamelCase( ):
for n in range(1 ,1000000 ):
yield n * (n + 1) // 2
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : List[Any] = 2
while i * i <= n:
lowerCAmelCase_ : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCamelCase( ):
return next(i for i in triangle_number_generator() if count_divisors(__UpperCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 171 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=None ,**__UpperCamelCase : Optional[Any] ):
lowerCAmelCase_ : int = [x.strip() for x in open(__UpperCamelCase ).readlines()]
lowerCAmelCase_ : Optional[Any] = [x.strip() for x in open(__UpperCamelCase ).readlines()][: len(__UpperCamelCase )]
lowerCAmelCase_ : Tuple = calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
if save_path is not None:
save_json(__UpperCamelCase ,__UpperCamelCase ,indent=__UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 171 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowerCAmelCase : Tuple = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
_lowerCamelCase : str
_lowerCamelCase : List[str]
_lowerCamelCase : Optional[List[str]]
@dataclass
class lowerCAmelCase :
_lowerCamelCase : List[int]
_lowerCamelCase : List[int]
_lowerCamelCase : Optional[List[int]] = None
_lowerCamelCase : Optional[List[int]] = None
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """train"""
_lowerCamelCase : Optional[int] = """dev"""
_lowerCamelCase : Tuple = """test"""
class lowerCAmelCase :
@staticmethod
def lowercase ( snake_case__ , snake_case__ ):
raise NotImplementedError
@staticmethod
def lowercase ( snake_case__ ):
raise NotImplementedError
@staticmethod
def lowercase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__="[CLS]" , snake_case__=1 , snake_case__="[SEP]" , snake_case__=False , snake_case__=False , snake_case__=0 , snake_case__=0 , snake_case__=-100 , snake_case__=0 , snake_case__=True , ):
lowerCAmelCase : Optional[int] = {label: i for i, label in enumerate(snake_case__ )}
lowerCAmelCase : Tuple = []
for ex_index, example in enumerate(snake_case__ ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' , snake_case__ , len(snake_case__ ) )
lowerCAmelCase : Dict = []
lowerCAmelCase : Union[str, Any] = []
for word, label in zip(example.words , example.labels ):
lowerCAmelCase : Any = tokenizer.tokenize(snake_case__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(snake_case__ ) > 0:
tokens.extend(snake_case__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(snake_case__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase : Optional[int] = tokenizer.num_special_tokens_to_add()
if len(snake_case__ ) > max_seq_length - special_tokens_count:
lowerCAmelCase : Optional[Any] = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase : List[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase : Optional[Any] = [sequence_a_segment_id] * len(snake_case__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase : List[str] = [cls_token] + tokens
lowerCAmelCase : List[str] = [pad_token_label_id] + label_ids
lowerCAmelCase : List[str] = [cls_token_segment_id] + segment_ids
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase : Dict = [1 if mask_padding_with_zero else 0] * len(snake_case__ )
# Zero-pad up to the sequence length.
lowerCAmelCase : Any = max_seq_length - len(snake_case__ )
if pad_on_left:
lowerCAmelCase : Union[str, Any] = ([pad_token] * padding_length) + input_ids
lowerCAmelCase : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase : Union[str, Any] = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase : Any = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(snake_case__ ) == max_seq_length
assert len(snake_case__ ) == max_seq_length
assert len(snake_case__ ) == max_seq_length
assert len(snake_case__ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(snake_case__ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(snake_case__ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(snake_case__ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(snake_case__ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(snake_case__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase : Optional[Any] = None
features.append(
InputFeatures(
input_ids=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , label_ids=snake_case__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase ( a ):
_lowerCamelCase : List[InputFeatures]
_lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__=False , snake_case__ = Split.train , ):
# Load data features from cache or dataset file
lowerCAmelCase : List[Any] = os.path.join(
snake_case__ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(snake_case__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase : str = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowerCAmelCase : Any = torch.load(snake_case__ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowerCAmelCase : Union[str, Any] = token_classification_task.read_examples_from_file(snake_case__ , snake_case__ )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase : str = token_classification_task.convert_examples_to_features(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self , snake_case__ ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
_lowerCamelCase : List[InputFeatures]
_lowerCamelCase : int = -100
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__=False , snake_case__ = Split.train , ):
lowerCAmelCase : Any = token_classification_task.read_examples_from_file(snake_case__ , snake_case__ )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase : Dict = token_classification_task.convert_examples_to_features(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase : List[Any] = tf.data.Dataset.from_generator(
snake_case__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCAmelCase : Tuple = tf.data.Dataset.from_generator(
snake_case__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , snake_case__ ):
return self.features[i]
| 646 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase : int = checkpoint['input_conv.weight_g']
lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
lowerCAmelCase : Dict = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase : Optional[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.weight_v"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
lowerCAmelCase : List[str] = checkpoint['output_conv.1.weight_v']
lowerCAmelCase : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Any=None , _A : Any=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase : List[Any] = SpeechTaHifiGan(_A )
lowerCAmelCase : List[str] = torch.load(_A )
load_weights(orig_checkpoint['model']['generator'] , _A , _A )
lowerCAmelCase : Tuple = np.load(_A )
lowerCAmelCase : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase : int = stats[1].reshape(-1 )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).float()
lowerCAmelCase : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 646 | 1 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 |
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 | 1 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : str = StableDiffusionControlNetImgaImgPipeline
__magic_name__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__magic_name__ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__magic_name__ : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__( self : Any )-> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a__( self : Any , lowerCAmelCase : int , lowerCAmelCase : List[str]=0 )-> Any:
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase = 2
UpperCAmelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
UpperCAmelCase = floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def a__( self : List[str] )-> int:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__( self : Dict )-> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Tuple = StableDiffusionControlNetImgaImgPipeline
__magic_name__ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__magic_name__ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ : Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase : Tuple ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any]=0 )-> List[str]:
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase = 2
UpperCAmelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
UpperCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
UpperCAmelCase = 10.0
UpperCAmelCase = 4
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase = steps
UpperCAmelCase = scale
UpperCAmelCase = pipe(**lowerCAmelCase )[0]
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase = steps
UpperCAmelCase = scale
UpperCAmelCase = pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase = steps
UpperCAmelCase = scale
UpperCAmelCase = pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase = steps
UpperCAmelCase = scale
UpperCAmelCase = pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def a__( self : str )-> List[str]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = '''evil space-punk bird'''
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCAmelCase = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCAmelCase = pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 50 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : Tuple ):
__UpperCAmelCase = ''''''
__UpperCAmelCase = ''''''
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 2_56
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
def a ( self : List[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = cva.imread(_lowercase , 0 )
__UpperCAmelCase = copy.deepcopy(self.img )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' )
__UpperCAmelCase = np.sum(_lowercase )
for i in range(len(_lowercase ) ):
__UpperCAmelCase = x[i] / self.k
self.sk += prk
__UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
__UpperCAmelCase = int(last % last )
__UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_lowercase )
__UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
__UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
__UpperCAmelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def a ( self : Tuple ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def a ( self : Union[str, Any] ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
_lowercase : Optional[int] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_lowercase : Union[str, Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 49 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = old_name
if "patch_embed" in old_name:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = old_name.split('.' )
if layer == "0":
lowerCamelCase_ = old_name.replace('0' , 'convolution1' )
elif layer == "1":
lowerCamelCase_ = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
lowerCamelCase_ = old_name.replace('3' , 'convolution2' )
else:
lowerCamelCase_ = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , lowercase ):
lowerCamelCase_ = r'\b\d{2}\b'
if bool(re.search(lowercase , lowercase ) ):
lowerCamelCase_ = re.search(r'\d\.\d\d.' , lowercase ).group()
else:
lowerCamelCase_ = re.search(r'\d\.\d.' , lowercase ).group()
if int(match[0] ) < 6:
lowerCamelCase_ = old_name.replace(lowercase , '' )
lowerCamelCase_ = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
lowerCamelCase_ = 'intermediate_stages.' + trimmed_name
else:
lowerCamelCase_ = old_name.replace(lowercase , '' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCamelCase_ = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
lowerCamelCase_ = str(int(match[2] ) - num_meta4D_last_stage )
lowerCamelCase_ = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
lowerCamelCase_ = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
lowerCamelCase_ = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
lowerCamelCase_ = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
lowerCamelCase_ = trimmed_name.replace('fc2' , 'linear_out' )
lowerCamelCase_ = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , lowercase ):
lowerCamelCase_ = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
lowerCamelCase_ = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCamelCase_ = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCamelCase_ = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
lowerCamelCase_ = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
lowerCamelCase_ = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
lowerCamelCase_ = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
lowerCamelCase_ = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCamelCase_ = new_name.replace('norm' , 'layernorm' )
lowerCamelCase_ = 'efficientformer.' + new_name
else:
lowerCamelCase_ = 'efficientformer.encoder.' + new_name
return new_name
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
for key in checkpoint.copy().keys():
lowerCamelCase_ = checkpoint.pop(lowercase )
lowerCamelCase_ = val
return checkpoint
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return image
def _SCREAMING_SNAKE_CASE ( lowercase : Path , lowercase : Path , lowercase : Path , lowercase : bool ):
'''simple docstring'''
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )['model']
lowerCamelCase_ = EfficientFormerConfig.from_json_file(lowercase )
lowerCamelCase_ = EfficientFormerForImageClassificationWithTeacher(lowercase )
lowerCamelCase_ = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
lowerCamelCase_ = config.depths[-1] - config.num_metaad_blocks + 1
lowerCamelCase_ = convert_torch_checkpoint(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
lowerCamelCase_ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = 2_56
lowerCamelCase_ = 2_24
lowerCamelCase_ = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
lowerCamelCase_ = processor(images=lowercase , return_tensors='pt' ).pixel_values
# original processing pipeline
lowerCamelCase_ = Compose(
[
Resize(lowercase , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(lowercase ),
ToTensor(),
Normalize(lowercase , lowercase ),
] )
lowerCamelCase_ = image_transforms(lowercase ).unsqueeze(0 )
assert torch.allclose(lowercase , lowercase )
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = (1, 10_00)
if "l1" in model_name:
lowerCamelCase_ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowercase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCamelCase_ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowercase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCamelCase_ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=lowercase , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=lowercase , )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase : str = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox_japanese'''
def __init__( self : int , A_ : Dict=32000 , A_ : List[Any]=2560 , A_ : Dict=32 , A_ : Union[str, Any]=32 , A_ : List[Any]=4 , A_ : List[str]="gelu" , A_ : Dict=1.00 , A_ : int=10000 , A_ : Dict=2048 , A_ : Dict=0.02 , A_ : Any=1E-5 , A_ : Union[str, Any]=True , A_ : int=31996 , A_ : List[str]=31999 , A_ : List[Any]=0.1 , A_ : List[Any]=0.0 , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 651 | 0 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase : Any = get_tests_dir('''fixtures/dummy-config.json''')
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = 0
def _A ( self :Tuple ) -> int:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _A ( self :str ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Dict ) -> str:
'''simple docstring'''
snake_case_ : Tuple = AutoConfig.for_model("roberta" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
snake_case_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , "fake-roberta" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
snake_case_ : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(type(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("model" , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("bert" , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case_ : str = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
snake_case_ : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _A ( self :List[str] ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
snake_case_ : Tuple = AutoConfig.from_pretrained("bert-base" )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
snake_case_ : Any = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" )
def _A ( self :Any ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
snake_case_ : Dict = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
snake_case_ : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
snake_case_ : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
snake_case_ : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
class A_ (a_ ):
"""simple docstring"""
a__ = '''new-model'''
try:
AutoConfig.register("new-model" , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
snake_case_ : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
snake_case_ : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
snake_case_ : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 653 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase__ : Optional[Any] = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class UpperCamelCase__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : str = " " ):
lowerCAmelCase_ : Optional[Any] = sentence_delimiter
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ):
return list(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : Optional[Any] = []
for sent_idx, sentence in enumerate(SCREAMING_SNAKE_CASE_ ):
chars.extend(self.process_string(SCREAMING_SNAKE_CASE_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(SCREAMING_SNAKE_CASE_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowercase__ : Union[str, Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase__ : int = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase__ : List[str] = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowercase__ : Optional[int] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowercase__ : int = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict=False ):
if concatenate_texts:
return jiwer.compute_measures(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truth_transform=SCREAMING_SNAKE_CASE_ , hypothesis_transform=SCREAMING_SNAKE_CASE_ , )["wer"]
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[str] = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[str] = jiwer.compute_measures(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truth_transform=SCREAMING_SNAKE_CASE_ , hypothesis_transform=SCREAMING_SNAKE_CASE_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 317 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VQModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=(3_2, 3_2) ):
lowerCAmelCase_ : Tuple = 4
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : int = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Union[str, Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
lowerCAmelCase_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : str ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Any = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase_ : int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase_ : Dict = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase_ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase_ : int = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
| 317 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCAmelCase_ ( __lowercase, __lowercase ):
@register_to_config
def __init__( self : Any , _A : int = 768 , ):
super().__init__()
_UpperCamelCase = nn.Parameter(torch.zeros(1 , _A ) )
_UpperCamelCase = nn.Parameter(torch.ones(1 , _A ) )
def UpperCamelCase_ ( self : List[str] , _A : Optional[Union[str, torch.device]] = None , _A : Optional[torch.dtype] = None , ):
_UpperCamelCase = nn.Parameter(self.mean.to(_A ).to(_A ) )
_UpperCamelCase = nn.Parameter(self.std.to(_A ).to(_A ) )
return self
def UpperCamelCase_ ( self : Optional[int] , _A : List[Any] ):
_UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCamelCase_ ( self : str , _A : List[str] ):
_UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 10 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (__A , __A , unittest.TestCase ):
"""simple docstring"""
_a : str = CycleDiffusionPipeline
_a : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
_a : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
_a : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
_a : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1_000 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
a_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ = CLIPTextModel(UpperCamelCase__ )
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _a ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
"""simple docstring"""
a_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
a_ = image / 2 + 0.5
if str(UpperCamelCase__ ).startswith('mps' ):
a_ = torch.manual_seed(UpperCamelCase__ )
else:
a_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
a_ = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
"""simple docstring"""
a_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = CycleDiffusionPipeline(**UpperCamelCase__ )
a_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = pipe(**UpperCamelCase__ )
a_ = output.images
a_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _a ( self ):
"""simple docstring"""
a_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCamelCase__ , 'half' ):
a_ = module.half()
a_ = CycleDiffusionPipeline(**UpperCamelCase__ )
a_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = pipe(**UpperCamelCase__ )
a_ = output.images
a_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def _a ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
"""simple docstring"""
a_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
a_ = init_image.resize((512, 512) )
a_ = 'CompVis/stable-diffusion-v1-4'
a_ = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
a_ = CycleDiffusionPipeline.from_pretrained(
UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
a_ = 'A black colored car'
a_ = 'A blue colored car'
a_ = torch.manual_seed(0 )
a_ = pipe(
prompt=UpperCamelCase__ , source_prompt=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase__ , output_type='np' , )
a_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _a ( self ):
"""simple docstring"""
a_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
a_ = init_image.resize((512, 512) )
a_ = 'CompVis/stable-diffusion-v1-4'
a_ = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
a_ = CycleDiffusionPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
a_ = 'A black colored car'
a_ = 'A blue colored car'
a_ = torch.manual_seed(0 )
a_ = pipe(
prompt=UpperCamelCase__ , source_prompt=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase__ , output_type='np' , )
a_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 536 | 0 |
import json
import sys
def __lowercase( __snake_case : Tuple ,__snake_case : Optional[int] ) -> Tuple:
with open(UpperCAmelCase__ ,encoding='utf-8' ) as f:
__snake_case = json.load(UpperCAmelCase__ )
__snake_case = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(UpperCAmelCase__ ):
__snake_case = results[benchmark_name]
__snake_case = benchmark_name.split('/' )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
__snake_case = '| metric |'
__snake_case = '|--------|'
__snake_case = '| new / old (diff) |'
for metric_name in sorted(UpperCAmelCase__ ):
__snake_case = benchmark_res[metric_name]
__snake_case = metric_vals['new']
__snake_case = metric_vals.get('old' ,UpperCAmelCase__ )
__snake_case = metric_vals.get('diff' ,UpperCAmelCase__ )
__snake_case = f''' {new_val:f}''' if isinstance(UpperCAmelCase__ ,(int, float) ) else 'None'
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(UpperCAmelCase__ ,(int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(UpperCAmelCase__ ,(int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(UpperCAmelCase__ ,'w' ,encoding='utf-8' ) as f:
f.writelines('\n'.join(UpperCAmelCase__ ) )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = sys.argv[1]
lowerCamelCase_ : Dict = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 710 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 345 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a , __a : Optional[Any] = emb.weight.shape
__a : Dict = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a : Tuple = emb.weight.data
return lin_layer
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str="facebook/mbart-large-en-ro" , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Optional[int]=False ):
__a : List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
__a : int = state_dict['encoder.embed_tokens.weight'].shape[0]
__a : List[Any] = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
__a : Union[str, Any] = 'relu'
__a : Optional[Any] = state_dict['decoder.embed_tokens.weight']
__a : List[Any] = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
__a : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
__lowercase : Dict = parser.parse_args()
__lowercase : List[Any] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 476 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__lowercase : Tuple = 0b101100111110110010010000011110111011000110011110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__lowercase : Union[str, Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : int = WATERMARK_BITS
__a : Union[str, Any] = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
__a : List[str] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a : List[str] = [self.encoder.encode(__a , 'dwtDct' ) for image in images]
__a : str = torch.from_numpy(np.array(__a ) ).permute(0 , 3 , 1 , 2 )
__a : List[str] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 476 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase : Optional[Any] = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = ["MobileViTFeatureExtractor"]
__UpperCAmelCase : Optional[int] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 643 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase_ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
__a = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase__ : Union[str, Any] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Dict = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase__ : str = len(__lowerCAmelCase )
self.assertGreater(__lowerCAmelCase , 0 )
self.assertEqual(
__lowerCAmelCase , [
{
'''score''': ANY(__lowerCAmelCase ),
'''label''': ANY(__lowerCAmelCase ),
'''box''': {'''xmin''': ANY(__lowerCAmelCase ), '''ymin''': ANY(__lowerCAmelCase ), '''xmax''': ANY(__lowerCAmelCase ), '''ymax''': ANY(__lowerCAmelCase )},
}
for i in range(__lowerCAmelCase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ):
"""simple docstring"""
pass
@require_torch
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase__ : Tuple = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
lowerCAmelCase__ : Any = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
[
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase__ : Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
lowerCAmelCase__ : List[str] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = 0.2
lowerCAmelCase__ : Optional[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase__ : Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowerCAmelCase , )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 2
lowerCAmelCase__ : Dict = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase__ : Tuple = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowerCAmelCase , )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 233 | """simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a ( lowerCAmelCase_ ):
@slow
@require_torch
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase = bertabert.config.encoder.vocab_size
_UpperCAmelCase = tokenizer.sep_token_id
_UpperCAmelCase = tokenizer.cls_token_id
_UpperCAmelCase = 128
_UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_UpperCAmelCase = train_dataset.select(range(32 ) )
_UpperCAmelCase = val_dataset.select(range(16 ) )
_UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(__lowerCAmelCase : Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=512 )
_UpperCAmelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=128 )
_UpperCAmelCase = inputs.input_ids
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = outputs.input_ids
_UpperCAmelCase = outputs.input_ids.copy()
_UpperCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase = outputs.attention_mask
assert all(len(__lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCAmelCase : int ):
_UpperCAmelCase = pred.label_ids
_UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCAmelCase ) )] ) / len(__lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=__lowerCAmelCase , per_device_train_batch_size=__lowerCAmelCase , per_device_eval_batch_size=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , evaluation_strategy="""steps""" , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# start training
trainer.train()
| 277 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A_ : Any = tmp_path_factory.getbasetemp() / '''cache'''
A_ : Dict = test_hf_cache_home / '''datasets'''
A_ : Union[str, Any] = test_hf_cache_home / '''metrics'''
A_ : Union[str, Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(SCREAMING_SNAKE_CASE ) )
A_ : Union[str, Any] = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(SCREAMING_SNAKE_CASE ) )
A_ : List[str] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE , scope='''session''' )
def _SCREAMING_SNAKE_CASE ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , SCREAMING_SNAKE_CASE )
| 152 |
from functools import reduce
UpperCamelCase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str(int(SCREAMING_SNAKE_CASE ) * int(SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 152 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[str] = g.get_repo('''huggingface/diffusers''' )
UpperCAmelCase__ : Optional[Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : Union[str, Any] = sorted(issue.get_comments() , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 75 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
return base * power(UpperCamelCase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
__UpperCAmelCase =int(input("Enter the base: ").strip())
__UpperCAmelCase =int(input("Enter the exponent: ").strip())
__UpperCAmelCase =power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__UpperCAmelCase =1 / result
print(f'{base} to the power of {exponent} is {result}')
| 546 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( snake_case__ , unittest.TestCase ):
__A : Tuple = ProphetNetTokenizer
__A : Tuple = False
def __snake_case ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowercase :Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = 'UNwant\u00E9d,running'
lowercase :List[str] = 'unwanted, running'
return input_text, output_text
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Tuple = self.tokenizer_class(self.vocab_file )
lowercase :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Union[str, Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Optional[int] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Dict = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowercase :Optional[int] = {}
for i, token in enumerate(_A ):
lowercase :Tuple = i
lowercase :Tuple = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :int = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase :Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowercase :str = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
lowercase :str = tokenizer(_A , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
lowercase :List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __snake_case ( self : Any ):
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __snake_case ( self : int ):
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase :Any = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
lowercase :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
lowercase :str = tokenizer.build_inputs_with_special_tokens(_A )
lowercase :Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 711 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase () -> tuple[list[int], int]:
lowercase :Any = [randint(-1000 , 1000) for i in range(10)]
lowercase :Any = randint(-5000 , 5000)
return (arr, r)
UpperCAmelCase = make_dataset()
def lowerCamelCase (a_ :list[int] , a_ :int) -> tuple[int, ...]:
for triplet in permutations(a_ , 3):
if sum(a_) == target:
return tuple(sorted(a_))
return (0, 0, 0)
def lowerCamelCase (a_ :list[int] , a_ :int) -> tuple[int, int, int]:
arr.sort()
lowercase :Union[str, Any] = len(a_)
for i in range(n - 1):
lowercase , lowercase :Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase () -> tuple[float, float]:
lowercase :int = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
lowercase :Optional[Any] = '''
triplet_sum1(*dataset)
'''
lowercase :Union[str, Any] = '''
triplet_sum2(*dataset)
'''
lowercase :Dict = repeat(setup=a_ , stmt=a_ , repeat=5 , number=1_0000)
lowercase :Optional[int] = repeat(setup=a_ , stmt=a_ , repeat=5 , number=1_0000)
return (min(a_), min(a_))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 475 | 0 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__magic_name__ = pytest.mark.integration
@pytest.mark.parametrize('path',['paws', 'csv'] )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
inspect_dataset(UpperCAmelCase__,UpperCAmelCase__ )
a__ = path + '.py'
assert script_name in os.listdir(UpperCAmelCase__ )
assert "__pycache__" not in os.listdir(UpperCAmelCase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path',['accuracy'] )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
inspect_metric(UpperCAmelCase__,UpperCAmelCase__ )
a__ = path + '.py'
assert script_name in os.listdir(UpperCAmelCase__ )
assert "__pycache__" not in os.listdir(UpperCAmelCase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits',[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
],)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
a__ = get_dataset_config_info(UpperCAmelCase__,config_name=UpperCAmelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception',[
('paws', None, ValueError),
],)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
with pytest.raises(UpperCAmelCase__ ):
get_dataset_config_info(UpperCAmelCase__,config_name=UpperCAmelCase__ )
@pytest.mark.parametrize(
'path, expected',[
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
],)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = get_dataset_config_names(UpperCAmelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config',[
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
],)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ = get_dataset_infos(UpperCAmelCase__ )
assert list(infos.keys() ) == expected_configs
a__ = expected_configs[0]
assert expected_config in infos
a__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits',[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
],)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ = get_dataset_infos(UpperCAmelCase__ )
assert expected_config in infos
a__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception',[
('paws', None, ValueError),
],)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Dict:
'''simple docstring'''
with pytest.raises(UpperCAmelCase__ ):
get_dataset_split_names(UpperCAmelCase__,config_name=UpperCAmelCase__ )
| 232 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str , _snake_case : Optional[Any] , _snake_case : Tuple=13 , _snake_case : Any=30 , _snake_case : List[str]=2 , _snake_case : int=3 , _snake_case : List[Any]=True , _snake_case : str=True , _snake_case : Tuple=32 , _snake_case : Tuple=2 , _snake_case : Dict=4 , _snake_case : int=37 , _snake_case : List[str]="gelu" , _snake_case : Any=0.1 , _snake_case : int=0.1 , _snake_case : Optional[int]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Optional[int]=3 , _snake_case : Tuple=None , ) -> Optional[int]:
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ = (image_size // patch_size) ** 2
a__ = num_patches + 1
def _lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Any , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
a__ = TFViTModel(config=_snake_case )
a__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
a__ = self.image_size // 2
a__ = pixel_values[:, :, :image_size, :image_size]
a__ = model(_snake_case , interpolate_pos_encoding=_snake_case , training=_snake_case )
a__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[Any] , _snake_case : Dict , _snake_case : Any , _snake_case : List[str] ) -> Dict:
'''simple docstring'''
a__ = self.type_sequence_label_size
a__ = TFViTForImageClassification(_snake_case )
a__ = model(_snake_case , labels=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
a__ = self.image_size // 2
a__ = pixel_values[:, :, :image_size, :image_size]
a__ = model(_snake_case , interpolate_pos_encoding=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ = 1
a__ = TFViTForImageClassification(_snake_case )
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( a , a , unittest.TestCase ):
"""simple docstring"""
a_ : List[str] =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
a_ : List[str] =(
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
a_ : Optional[int] =False
a_ : Optional[Any] =False
a_ : Optional[Any] =False
def _lowerCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ = TFViTModelTester(self )
a__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass
def _lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , tf.keras.layers.Layer ) )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_snake_case )
a__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _lowerCAmelCase ( self : str ) -> Any:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _lowerCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_snake_case )
def _lowerCamelCase ( ) -> Any:
'''simple docstring'''
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
a__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=_snake_case , return_tensors='tf' )
# forward pass
a__ = model(**_snake_case )
# verify the logits
a__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
a__ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _snake_case , atol=1E-4 )
| 232 | 1 |
"""simple docstring"""
import os
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ = "input.txt" ):
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ ) ) as input_file:
SCREAMING_SNAKE_CASE = [
[int(SCREAMING_SNAKE_CASE_ ) for element in line.split(',' )]
for line in input_file.readlines()
]
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = len(matrix[0] )
SCREAMING_SNAKE_CASE = [[-1 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1, SCREAMING_SNAKE_CASE_ ):
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2, -1, -1 ):
SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 406 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = 1.5
SCREAMING_SNAKE_CASE = int(factor * num_class_images )
SCREAMING_SNAKE_CASE = ClipClient(
url='https://knn.laion.ai/knn-service', indice_name='laion_400m', num_images=SCREAMING_SNAKE_CASE_, aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''', exist_ok=SCREAMING_SNAKE_CASE_ )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE = client.query(text=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
SCREAMING_SNAKE_CASE = int(factor * num_images )
SCREAMING_SNAKE_CASE = ClipClient(
url='https://knn.laion.ai/knn-service', indice_name='laion_400m', num_images=SCREAMING_SNAKE_CASE_, aesthetic_weight=0.1, )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = tqdm(desc='downloading real regularization images', total=SCREAMING_SNAKE_CASE_ )
with open(f'''{class_data_dir}/caption.txt''', 'w' ) as fa, open(f'''{class_data_dir}/urls.txt''', 'w' ) as fa, open(
f'''{class_data_dir}/images.txt''', 'w' ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE = requests.get(images['url'] )
if img.status_code == 2_0_0:
SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''', 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase_ ( ):
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('', add_help=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--class_prompt', help='text prompt to retrieve images', required=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--class_data_dir', help='path to save images', required=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('--num_class_images', help='number of images to download', default=2_0_0, type=SCREAMING_SNAKE_CASE_ )
return parser.parse_args()
if __name__ == "__main__":
snake_case = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 406 | 1 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 |
def __a ( SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
__UpperCAmelCase = False
if low == high:
return swapped
__UpperCAmelCase = low
__UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
__UpperCAmelCase , __UpperCAmelCase = (
collection[right],
collection[left],
)
__UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__UpperCAmelCase , __UpperCAmelCase = (
collection[right + 1],
collection[left],
)
__UpperCAmelCase = True
__UpperCAmelCase = low + int((high - low) / 2 )
__UpperCAmelCase = circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = circle_sort_util(SCREAMING_SNAKE_CASE , mid + 1 , SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
__UpperCAmelCase = True
while is_not_sorted is True:
__UpperCAmelCase = circle_sort_util(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
A_ : str = input('Enter numbers separated by a comma:\n').strip()
A_ : List[str] = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 303 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __UpperCAmelCase ( __magic_name__ = 150_0000 )-> int:
"""simple docstring"""
snake_case_ : defaultdict = defaultdict(__magic_name__ )
snake_case_ : Tuple = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 ,__magic_name__ ,2 ):
if gcd(__magic_name__ ,__magic_name__ ) > 1:
continue
snake_case_ : Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__magic_name__ ,limit + 1 ,__magic_name__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 711 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : str = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCamelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCamelCase : List[str] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 656 | 0 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Dict:
'''simple docstring'''
A__ = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> List[Any]:
'''simple docstring'''
A__ = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', "stage2.cls_token") )
return token
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = "imagenet-1k-id2label.json"
A__ = 1_0_0_0
A__ = "huggingface/label-files"
A__ = num_labels
A__ = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) ) , "r" ) )
A__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = A__ = CvtConfig(num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
A__ = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
A__ = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A__ = [2, 2, 2_0]
A__ = [3, 1_2, 1_6]
A__ = [1_9_2, 7_6_8, 1_0_2_4]
A__ = CvtForImageClassification(SCREAMING_SNAKE_CASE_ )
A__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
A__ = image_size
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=torch.device("cpu" ) )
A__ = OrderedDict()
A__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A__ = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE_ )
A__ = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE_ )
for cnt in range(config.depth[idx] ):
A__ = list_of_state_dict + attention(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
A__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_8_4,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 514 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowerCAmelCase__ = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
A__ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A__ = "<|endoftext|>" if eos_token is None else eos_token
A__ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A__ = unk_token if pad_token is None else pad_token
A__ = eos_token if bos_token is None else bos_token
else:
A__ = "<pad>" if pad_token is None else pad_token
A__ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# Used for whitespace normalization in input texts
# fmt : off
A__ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A__ = re.compile(
F'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
A__ = self.non_printing_characters_re.sub("" , lowercase )
# Normalize whitespaces
A__ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
A__ = unicodedata.normalize("NFC" , lowercase )
return text
def UpperCamelCase ( self , lowercase , **lowercase ) -> List[str]:
'''simple docstring'''
A__ = self.preprocess_text(lowercase )
return self.sp_model.encode(lowercase , out_type=lowercase )
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(lowercase )
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase )
@staticmethod
def UpperCamelCase ( lowercase ) -> str:
'''simple docstring'''
return out_string
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
A__ = []
A__ = ""
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(lowercase )
A__ = False
out_string += self.sp_model.decode(lowercase )
return out_string
def UpperCamelCase ( self ) -> Dict[str, int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def UpperCamelCase ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = self.preprocess_text(lowercase )
A__ = self.sp_model.encode(lowercase )
else:
A__ = [self.preprocess_text(lowercase ) for t in text]
A__ = self.sp_model.encode(lowercase )
if return_tensors is True or return_tensors == "pt":
A__ = torch.tensor(lowercase )
return token_ids
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
return self.sp_model.decode(lowercase )
def UpperCamelCase ( self , lowercase ) -> List[int]:
'''simple docstring'''
A__ = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
A__ = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(lowercase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=lowercase )
| 514 | 1 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 152 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
a_ : List[Any] = get_logger(__name__)
class snake_case :
"""simple docstring"""
_lowerCamelCase = "dummy_data"
_lowerCamelCase = "datasets"
_lowerCamelCase = False
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = dataset_name
lowerCamelCase_ = cache_dir
lowerCamelCase_ = use_local_dummy_data
lowerCamelCase_ = config
# download_callbacks take a single url as input
lowerCamelCase_ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCamelCase_ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCamelCase_ = str(UpperCamelCase )
# to be downloaded
lowerCamelCase_ = None
lowerCamelCase_ = None
@property
def snake_case ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCamelCase_ = self.download_dummy_data()
return self._dummy_file
@property
def snake_case ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def snake_case ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCamelCase_ = cached_path(
UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase , force_extract=UpperCamelCase )
return os.path.join(UpperCamelCase , self.dummy_file_name )
@property
def snake_case ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def snake_case ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCamelCase_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def snake_case ( self ):
"""simple docstring"""
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def snake_case ( self , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCamelCase_ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCamelCase_ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase , UpperCamelCase ):
return self.create_dummy_data_dict(UpperCamelCase , UpperCamelCase )
elif isinstance(UpperCamelCase , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase , UpperCamelCase )
else:
return self.create_dummy_data_single(UpperCamelCase , UpperCamelCase )
def snake_case ( self , UpperCamelCase , *UpperCamelCase ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase )
def snake_case ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return path
def snake_case ( self ):
"""simple docstring"""
return {}
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase , UpperCamelCase ):
for single_url in single_urls:
download_callback(UpperCamelCase )
else:
lowerCamelCase_ = single_urls
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) for x in single_urls]
else:
lowerCamelCase_ = single_urls
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) )
lowerCamelCase_ = value
# make sure that values are unique
if all(isinstance(UpperCamelCase , UpperCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCamelCase_ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCamelCase_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase ) ) for url in data_url )
lowerCamelCase_ = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCamelCase_ = [data_url[0]] * len(UpperCamelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(UpperCamelCase )
return dummy_data_list
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(UpperCamelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
def _iter_archive_members(UpperCamelCase ):
# this preserves the order of the members inside the ZIP archive
lowerCamelCase_ = Path(self.dummy_file ).parent
lowerCamelCase_ = path.relative_to(UpperCamelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCamelCase_ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase )
lowerCamelCase_ = Path(UpperCamelCase )
lowerCamelCase_ = _iter_archive_members(UpperCamelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(UpperCamelCase ).as_posix(), file_path.open("rb" )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase ):
if os.path.basename(UpperCamelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase ):
if os.path.basename(UpperCamelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(UpperCamelCase , UpperCamelCase )
| 675 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Union[str, Any]=2_24 , _lowerCAmelCase : List[str]=10_00 , _lowerCAmelCase : Union[str, Any]=[3, 3, 6, 4] , _lowerCAmelCase : Union[str, Any]=[48, 56, 1_12, 2_20] , ):
__snake_case : Optional[int] = parent
__snake_case : Dict = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = is_training
__snake_case : List[str] = use_labels
__snake_case : Any = hidden_dropout_prob
__snake_case : List[str] = attention_probs_dropout_prob
__snake_case : str = num_labels
__snake_case : Optional[Any] = image_size
__snake_case : Union[str, Any] = layer_depths
__snake_case : str = embed_dims
def snake_case__ ( self : Any ):
__snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Dict = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1e-5 , )
def snake_case__ ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ):
__snake_case : Tuple = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Any = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ):
__snake_case : str = self.num_labels
__snake_case : Optional[Any] = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__snake_case : List[Any] = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : List[str] ):
((__snake_case) , (__snake_case) , (__snake_case)) : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
A : Dict = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
A : Union[str, Any] = False
A : List[Any] = False
A : List[Any] = False
A : Optional[Any] = False
A : Dict = False
def snake_case__ ( self : Union[str, Any] ):
__snake_case : int = SwiftFormerModelTester(self )
__snake_case : Tuple = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def snake_case__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def snake_case__ ( self : List[Any] ):
pass
def snake_case__ ( self : Tuple ):
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(_lowerCAmelCase )
__snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def snake_case__ ( self : Dict ):
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(_lowerCAmelCase )
__snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def snake_case__ ( self : Optional[int] ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def snake_case__ ( self : List[str] ):
pass
def snake_case__ ( self : List[str] ):
def check_hidden_states_output(_lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
__snake_case : List[str] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case : int = outputs.hidden_states
__snake_case : str = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : int = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
def _config_zero_init(_lowerCAmelCase : List[Any] ):
__snake_case : Union[str, Any] = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1e-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
__snake_case : Any = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def snake_case__ ( self : Union[str, Any] ):
pass
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : List[Any] ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(**_lowerCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__snake_case : Dict = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 390 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "camembert"
def __init__( self : List[Any] , _lowerCAmelCase : Any=3_05_22 , _lowerCAmelCase : str=7_68 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : Optional[Any]=30_72 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Any=5_12 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[str]="absolute" , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : Optional[int] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : List[str] = initializer_range
__snake_case : Dict = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[Any] = use_cache
__snake_case : str = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 390 | 1 |
"""simple docstring"""
from collections import deque
def lowercase_ ( __UpperCAmelCase ) -> int:
lowerCAmelCase__ : Optional[int] = len(__UpperCAmelCase )
lowerCAmelCase__ : int = deque()
lowerCAmelCase__ : Optional[int] = [False for _ in range(__UpperCAmelCase )]
lowerCAmelCase__ : Any = [-1 for _ in range(__UpperCAmelCase )]
lowerCAmelCase__ : Any = index_of[:]
def strong_connect(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = index # the number when this node is seen
lowerCAmelCase__ : Tuple = index # lowest rank node reachable from here
index += 1
stack.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = True
for w in g[v]:
if index_of[w] == -1:
lowerCAmelCase__ : List[str] = strong_connect(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCAmelCase__ : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[int] = stack.pop()
lowerCAmelCase__ : Tuple = False
component.append(__UpperCAmelCase )
while w != v:
lowerCAmelCase__ : Optional[int] = stack.pop()
lowerCAmelCase__ : Optional[Any] = False
component.append(__UpperCAmelCase )
components.append(__UpperCAmelCase )
return index
lowerCAmelCase__ : Dict = []
for v in range(__UpperCAmelCase ):
if index_of[v] == -1:
strong_connect(__UpperCAmelCase , 0 , __UpperCAmelCase )
return components
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = [[] for _ in range(__UpperCAmelCase )]
for u, v in edges:
g[u].append(__UpperCAmelCase )
return g
if __name__ == "__main__":
# Test
_A = 7
_A = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_A = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_A = [(u, v) for u, v in zip(source, target)]
_A = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 299 |
"""simple docstring"""
from __future__ import annotations
import queue
class _lowerCamelCase :
def __init__( self : Optional[int] , UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = data
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Optional[int] = None
def lowercase_ ( ) -> TreeNode:
print("""\n********Press N to stop entering at any point of time********\n""" )
lowerCAmelCase__ : Any = input("""Enter the value of the root node: """ ).strip().lower()
lowerCAmelCase__ : queue.Queue = queue.Queue()
lowerCAmelCase__ : Union[str, Any] = TreeNode(int(__UpperCAmelCase ) )
q.put(__UpperCAmelCase )
while not q.empty():
lowerCAmelCase__ : Dict = q.get()
lowerCAmelCase__ : Optional[Any] = f"""Enter the left node of {node_found.data}: """
lowerCAmelCase__ : List[str] = input(__UpperCAmelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCAmelCase__ : Dict = TreeNode(int(__UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = left_node
q.put(__UpperCAmelCase )
lowerCAmelCase__ : str = f"""Enter the right node of {node_found.data}: """
lowerCAmelCase__ : str = input(__UpperCAmelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCAmelCase__ : List[Any] = TreeNode(int(__UpperCAmelCase ) )
lowerCAmelCase__ : List[str] = right_node
q.put(__UpperCAmelCase )
raise
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
lowerCAmelCase__ : queue.Queue = queue.Queue()
q.put(__UpperCAmelCase )
while not q.empty():
lowerCAmelCase__ : Union[str, Any] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
lowerCAmelCase__ : queue.Queue = queue.Queue()
q.put(__UpperCAmelCase )
while not q.empty():
lowerCAmelCase__ : Tuple = []
while not q.empty():
lowerCAmelCase__ : Optional[int] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
lowerCAmelCase__ : list[TreeNode] = []
lowerCAmelCase__ : List[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCAmelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCAmelCase__ : Optional[Any] = n.right
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
lowerCAmelCase__ : list[TreeNode] = []
lowerCAmelCase__ : List[str] = node
while n or stack:
while n:
stack.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = n.left
lowerCAmelCase__ : Any = stack.pop()
print(n.data , end=""",""" )
lowerCAmelCase__ : Optional[int] = n.right
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
lowerCAmelCase__ , lowerCAmelCase__ : Dict = [], []
lowerCAmelCase__ : List[Any] = node
stacka.append(__UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase__ : Tuple = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def lowercase_ ( __UpperCAmelCase = "" , __UpperCAmelCase=50 , __UpperCAmelCase="*" ) -> str:
if not s:
return "\n" + width * char
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = divmod(width - len(__UpperCAmelCase ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
_A = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 299 | 1 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase ( snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[Any] )-> Any:
A_ = multiprocessing.Manager()
A_ = manager.list()
A_ = multiprocessing.Process(target=snake_case__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase ( snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[str] )-> int:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A_ = shutil.rmtree
A_ = os.rmdir
A_ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A_ = {}
with swallow_io():
with time_limit(snake_case__ ):
exec(snake_case__ , snake_case__ )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(f'failed: {e}' )
# Needed for cleaning up.
A_ = rmtree
A_ = rmdir
A_ = chdir
@contextlib.contextmanager
def lowerCAmelCase ( snake_case__ : Dict )-> Optional[Any]:
def signal_handler(snake_case__ : Optional[int] , snake_case__ : Dict ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , snake_case__ )
signal.signal(signal.SIGALRM , snake_case__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCAmelCase ( )-> Optional[int]:
A_ = WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case__ ):
with contextlib.redirect_stderr(snake_case__ ):
with redirect_stdin(snake_case__ ):
yield
@contextlib.contextmanager
def lowerCAmelCase ( )-> int:
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case__ ):
yield dirname
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
pass
class lowerCamelCase ( io.StringIO ):
"""simple docstring"""
def lowercase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
raise OSError
def lowercase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
raise OSError
def lowercase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
raise OSError
def lowercase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
return False
class lowerCamelCase ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
lowerCAmelCase_ = """stdin"""
@contextlib.contextmanager
def lowerCAmelCase ( snake_case__ : Any )-> List[Any]:
if root == ".":
yield
return
A_ = os.getcwd()
os.chdir(snake_case__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case__ )
def lowerCAmelCase ( snake_case__ : List[Any]=None )-> Optional[Any]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A_ = None
A_ = None
import os
A_ = "1"
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
import shutil
A_ = None
A_ = None
A_ = None
import subprocess
A_ = None # type: ignore
A_ = None
import sys
A_ = None
A_ = None
A_ = None
A_ = None
A_ = None
| 608 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase_ = Features({"""text""": Value("""string""" )} )
lowerCAmelCase_ = Features({"""labels""": ClassLabel} )
lowerCAmelCase_ = "text"
lowerCAmelCase_ = "labels"
def lowercase_ ( self , __UpperCamelCase ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def lowercase_ ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 608 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( _A ):
_a : UNetaDModel
_a : ScoreSdeVeScheduler
def __init__( self , a , a ):
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = 2_0_0_0 , a = None , a = "pil" , a = True , **a , ):
snake_case__ : str =self.unet.config.sample_size
snake_case__ : Any =(batch_size, 3, img_size, img_size)
snake_case__ : Optional[Any] =self.unet
snake_case__ : str =randn_tensor(a , generator=a ) * self.scheduler.init_noise_sigma
snake_case__ : int =sample.to(self.device )
self.scheduler.set_timesteps(a )
self.scheduler.set_sigmas(a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case__ : Optional[Any] =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
snake_case__ : Tuple =self.unet(a , a ).sample
snake_case__ : Tuple =self.scheduler.step_correct(a , a , generator=a ).prev_sample
# prediction step
snake_case__ : List[Any] =model(a , a ).sample
snake_case__ : Optional[Any] =self.scheduler.step_pred(a , a , a , generator=a )
snake_case__ , snake_case__ : Optional[Any] =output.prev_sample, output.prev_sample_mean
snake_case__ : Optional[Any] =sample_mean.clamp(0 , 1 )
snake_case__ : Tuple =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : Union[str, Any] =self.numpy_to_pil(a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=a )
| 385 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(
_A , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class _lowercase ( _A ):
def lowercase__ ( self , a ):
if self.framework == "tf":
snake_case__ : int =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
snake_case__ : Optional[Any] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=a )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def lowercase__ ( self , a ):
snake_case__ : str =self.get_masked_index(a )
snake_case__ : Any =np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def lowercase__ ( self , a ):
if isinstance(a , a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(a )
def lowercase__ ( self , a , a=None , **a ):
if return_tensors is None:
snake_case__ : Optional[Any] =self.framework
snake_case__ : List[str] =self.tokenizer(a , return_tensors=a )
self.ensure_exactly_one_mask_token(a )
return model_inputs
def lowercase__ ( self , a ):
snake_case__ : Optional[Any] =self.model(**a )
snake_case__ : str =model_inputs["""input_ids"""]
return model_outputs
def lowercase__ ( self , a , a=5 , a=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
snake_case__ : Union[str, Any] =target_ids.shape[0]
snake_case__ : Union[str, Any] =model_outputs["""input_ids"""][0]
snake_case__ : List[Any] =model_outputs["""logits"""]
if self.framework == "tf":
snake_case__ : str =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
snake_case__ : Any =outputs.numpy()
snake_case__ : Optional[Any] =outputs[0, masked_index, :]
snake_case__ : List[Any] =stable_softmax(a , axis=-1 )
if target_ids is not None:
snake_case__ : str =tf.gather_nd(tf.squeeze(a , 0 ) , target_ids.reshape(-1 , 1 ) )
snake_case__ : List[str] =tf.expand_dims(a , 0 )
snake_case__ : Optional[Any] =tf.math.top_k(a , k=a )
snake_case__ , snake_case__ : int =topk.values.numpy(), topk.indices.numpy()
else:
snake_case__ : List[Any] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
snake_case__ : int =outputs[0, masked_index, :]
snake_case__ : Optional[int] =logits.softmax(dim=-1 )
if target_ids is not None:
snake_case__ : Dict =probs[..., target_ids]
snake_case__ , snake_case__ : List[Any] =probs.topk(a )
snake_case__ : List[Any] =[]
snake_case__ : int =values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
snake_case__ : Dict =[]
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
snake_case__ : List[Any] =input_ids.numpy().copy()
if target_ids is not None:
snake_case__ : Tuple =target_ids[p].tolist()
snake_case__ : Any =p
# Filter padding out:
snake_case__ : int =tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
snake_case__ : Union[str, Any] =self.tokenizer.decode(a , skip_special_tokens=a )
snake_case__ : int ={"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(a )
result.append(a )
if single_mask:
return result[0]
return result
def lowercase__ ( self , a , a=None ):
if isinstance(a , a ):
snake_case__ : Tuple =[targets]
try:
snake_case__ : Any =self.tokenizer.get_vocab()
except Exception:
snake_case__ : List[Any] ={}
snake_case__ : Any =[]
for target in targets:
snake_case__ : Optional[int] =vocab.get(a , a )
if id_ is None:
snake_case__ : str =self.tokenizer(
a , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , max_length=1 , truncation=a , )["""input_ids"""]
if len(a ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
snake_case__ : Any =input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
snake_case__ : Optional[Any] =list(set(a ) )
if len(a ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
snake_case__ : Tuple =np.array(a )
return target_ids
def lowercase__ ( self , a=None , a=None ):
snake_case__ : int ={}
if targets is not None:
snake_case__ : str =self.get_target_ids(a , a )
snake_case__ : Union[str, Any] =target_ids
if top_k is not None:
snake_case__ : Dict =top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , a , *a , **a ):
snake_case__ : List[Any] =super().__call__(a , **a )
if isinstance(a , a ) and len(a ) == 1:
return outputs[0]
return outputs
| 385 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a :
def __init__( self :List[Any] ,__lowercase :Tuple ,__lowercase :List[Any]=1_3 ,__lowercase :List[Any]=7 ,__lowercase :int=True ,__lowercase :int=True ,__lowercase :Tuple=True ,__lowercase :int=True ,__lowercase :Dict=9_9 ,__lowercase :Any=3_2 ,__lowercase :Tuple=2 ,__lowercase :Union[str, Any]=4 ,__lowercase :Tuple=3_7 ,__lowercase :int="gelu" ,__lowercase :int=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Optional[Any]=5_1_2 ,__lowercase :Optional[Any]=1_6 ,__lowercase :Optional[int]=2 ,__lowercase :Optional[int]=0.02 ,__lowercase :str=3 ,__lowercase :int=4 ,__lowercase :List[str]=None ,__lowercase :Union[str, Any]=0 ,):
snake_case__ : List[str] = parent
snake_case__ : int = batch_size
snake_case__ : Any = seq_length
snake_case__ : List[Any] = is_training
snake_case__ : str = use_input_mask
snake_case__ : str = use_token_type_ids
snake_case__ : Dict = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : Tuple = type_sequence_label_size
snake_case__ : Any = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : str = num_choices
snake_case__ : Optional[Any] = scope
snake_case__ : str = projection_dim
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Any = None
if self.use_token_type_ids:
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ : List[Any] = None
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Optional[int] = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
snake_case__ : Optional[int] = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :Tuple ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Tuple ):
snake_case__ : List[str] = TFDPRContextEncoder(config=__lowercase )
snake_case__ : Optional[int] = model(__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase )
snake_case__ : Optional[int] = model(__lowercase ,token_type_ids=__lowercase )
snake_case__ : Dict = model(__lowercase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :List[str] ,__lowercase :List[str] ,__lowercase :Optional[Any] ,__lowercase :int ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ):
snake_case__ : Dict = TFDPRQuestionEncoder(config=__lowercase )
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase )
snake_case__ : int = model(__lowercase ,token_type_ids=__lowercase )
snake_case__ : Optional[int] = model(__lowercase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :List[Any] ,__lowercase :str ,__lowercase :Tuple ,__lowercase :Any ,__lowercase :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Tuple ):
snake_case__ : int = TFDPRReader(config=__lowercase )
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Optional[int] = config_and_inputs
snake_case__ : Optional[Any] = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Tuple = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : List[str] = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : int = False
__lowerCAmelCase : Optional[int] = False
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = TFDPRModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :List[str] ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowercase )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Union[str, Any] = TFDPRContextEncoder.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Tuple = TFDPRContextEncoder.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Tuple = TFDPRQuestionEncoder.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = TFDPRReader.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_tf
class a ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
snake_case__ : Optional[int] = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
snake_case__ : Union[str, Any] = model(__lowercase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case__ : Optional[Any] = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 219 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.