code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(snake_case , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_attention_heads' ) )
class lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , snake_case : str , snake_case : List[str]=13 , snake_case : List[Any]=32 , snake_case : Tuple=2 , snake_case : Union[str, Any]=3 , snake_case : List[str]=640 , snake_case : List[Any]=4 , snake_case : List[Any]="silu" , snake_case : int=3 , snake_case : Tuple=32 , snake_case : List[str]=0.1 , snake_case : List[Any]=0.1 , snake_case : int=0.1 , snake_case : Optional[Any]=0.02 , snake_case : List[str]=True , snake_case : Union[str, Any]=True , snake_case : int=10 , snake_case : List[str]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = last_hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Any = conv_kernel_size
SCREAMING_SNAKE_CASE : Any = output_stride
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = scope
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : str , snake_case : List[Any] , snake_case : List[Any] , snake_case : List[str] , snake_case : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self : Any , snake_case : Optional[int] , snake_case : int , snake_case : Any , snake_case : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = MobileViTForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict , snake_case : List[str] , snake_case : Any , snake_case : Dict , snake_case : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE : Optional[int] = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : List[str] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase : Any = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : int = False
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(snake_case )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(snake_case : Tuple , snake_case : str , snake_case : int ):
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
SCREAMING_SNAKE_CASE : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[Any] = 5
self.assertEqual(len(snake_case ) , snake_case )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : Optional[int] = 2
for i in range(len(snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = MobileViTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __a ( ) -> int:
SCREAMING_SNAKE_CASE : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(snake_case )
SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**snake_case )
# verify the logits
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[Any] = model.to(snake_case )
SCREAMING_SNAKE_CASE : List[str] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**snake_case )
SCREAMING_SNAKE_CASE : str = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : int = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(snake_case )
SCREAMING_SNAKE_CASE : int = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**snake_case )
SCREAMING_SNAKE_CASE : Any = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case )
SCREAMING_SNAKE_CASE : str = image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE : Dict = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case ) | 352 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_lowerCamelCase : Union[str, Any] = tuple[int, int]
class lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : int , snake_case : int , snake_case : int , snake_case : int , snake_case : int , snake_case : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = pos_x
SCREAMING_SNAKE_CASE : str = pos_y
SCREAMING_SNAKE_CASE : Any = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : List[str] = goal_x
SCREAMING_SNAKE_CASE : Optional[Any] = goal_y
SCREAMING_SNAKE_CASE : int = g_cost
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : List[Any] = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : str = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : str = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case ) + abs(snake_case )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Any , snake_case : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : TPosition , snake_case : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case )
SCREAMING_SNAKE_CASE : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , snake_case )
SCREAMING_SNAKE_CASE : Any = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : int = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case )
self.closed_nodes.append(snake_case )
SCREAMING_SNAKE_CASE : int = self.get_successors(snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case )
else:
self.open_nodes.append(snake_case )
return [self.start.pos]
def lowerCamelCase_ ( self : str , snake_case : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = []
for action in delta:
SCREAMING_SNAKE_CASE : Optional[Any] = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case , snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case , ) )
return successors
def lowerCamelCase_ ( self : Optional[int] , snake_case : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = node
SCREAMING_SNAKE_CASE : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Dict = current_node.parent
path.reverse()
return path
class lowercase :
'''simple docstring'''
def __init__( self : str , snake_case : TPosition , snake_case : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AStar(snake_case , snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = AStar(snake_case , snake_case )
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : Dict = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case , snake_case )
self.fwd_astar.closed_nodes.append(snake_case )
self.bwd_astar.closed_nodes.append(snake_case )
SCREAMING_SNAKE_CASE : str = current_bwd_node
SCREAMING_SNAKE_CASE : int = current_fwd_node
SCREAMING_SNAKE_CASE : str = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case )
else:
astar.open_nodes.append(snake_case )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , snake_case : Node , snake_case : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.fwd_astar.retrace_path(snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.retrace_path(snake_case )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_lowerCamelCase : Tuple = (0, 0)
_lowerCamelCase : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_lowerCamelCase : Union[str, Any] = time.time()
_lowerCamelCase : List[str] = AStar(init, goal)
_lowerCamelCase : Optional[int] = a_star.search()
_lowerCamelCase : str = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_lowerCamelCase : Optional[Any] = time.time()
_lowerCamelCase : Union[str, Any] = BidirectionalAStar(init, goal)
_lowerCamelCase : List[Any] = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""") | 352 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = ["DeiTFeatureExtractor"]
_lowerCAmelCase : Union[str, Any] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Union[str, Any] = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
"""simple docstring"""
def __init__( self , snake_case , snake_case=12 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=0.02 , snake_case=0 , snake_case=None , ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Union[str, Any] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : List[Any] = use_input_mask
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Union[str, Any] = projection_dim
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : int = dropout
lowerCAmelCase__ : List[Any] = attention_dropout
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Optional[Any] = scope
lowerCAmelCase__ : List[Any] = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCAmelCase__ : str = input_mask.numpy()
lowerCAmelCase__ , lowerCAmelCase__ : int = input_mask.shape
lowerCAmelCase__ : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case ):
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : str = self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = TFBlipTextModel(config=snake_case )
lowerCAmelCase__ : int = model(snake_case , attention_mask=snake_case , training=snake_case )
lowerCAmelCase__ : int = model(snake_case , training=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = config_and_inputs
lowerCAmelCase__ : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __a ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCamelCase : str = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = BlipTextModelTester(self )
lowerCAmelCase__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = TFBlipTextModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case )
| 453 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> list:
lowerCAmelCase__ : List[str] = len(lowercase__ )
lowerCAmelCase__ : Dict = []
for i in range(len(lowercase__ ) - pat_len + 1 ):
lowerCAmelCase__ : Union[str, Any] = True
for j in range(lowercase__ ):
if s[i + j] != pattern[j]:
lowerCAmelCase__ : int = False
break
if match_found:
position.append(lowercase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 453 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class a ( lowercase_ ):
"""simple docstring"""
__lowerCAmelCase = '''cvt'''
def __init__( self , snake_case_=3 , snake_case_=[7, 3, 3] , snake_case_=[4, 2, 2] , snake_case_=[2, 1, 1] , snake_case_=[64, 192, 384] , snake_case_=[1, 3, 6] , snake_case_=[1, 2, 10] , snake_case_=[4.0, 4.0, 4.0] , snake_case_=[0.0, 0.0, 0.0] , snake_case_=[0.0, 0.0, 0.0] , snake_case_=[0.0, 0.0, 0.1] , snake_case_=[True, True, True] , snake_case_=[False, False, True] , snake_case_=["dw_bn", "dw_bn", "dw_bn"] , snake_case_=[3, 3, 3] , snake_case_=[1, 1, 1] , snake_case_=[2, 2, 2] , snake_case_=[1, 1, 1] , snake_case_=[1, 1, 1] , snake_case_=0.0_2 , snake_case_=1e-1_2 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
__UpperCAmelCase: Union[str, Any] = num_channels
__UpperCAmelCase: Union[str, Any] = patch_sizes
__UpperCAmelCase: Dict = patch_stride
__UpperCAmelCase: Any = patch_padding
__UpperCAmelCase: Any = embed_dim
__UpperCAmelCase: int = num_heads
__UpperCAmelCase: List[str] = depth
__UpperCAmelCase: List[Any] = mlp_ratio
__UpperCAmelCase: Optional[int] = attention_drop_rate
__UpperCAmelCase: List[Any] = drop_rate
__UpperCAmelCase: Any = drop_path_rate
__UpperCAmelCase: List[str] = qkv_bias
__UpperCAmelCase: Optional[Any] = cls_token
__UpperCAmelCase: Optional[int] = qkv_projection_method
__UpperCAmelCase: List[str] = kernel_qkv
__UpperCAmelCase: Any = padding_kv
__UpperCAmelCase: Union[str, Any] = stride_kv
__UpperCAmelCase: Optional[Any] = padding_q
__UpperCAmelCase: Optional[Any] = stride_q
__UpperCAmelCase: Union[str, Any] = initializer_range
__UpperCAmelCase: Union[str, Any] = layer_norm_eps | 720 | '''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a :
"""simple docstring"""
def __init__( self , snake_case_ = None ):
'''simple docstring'''
if components is None:
__UpperCAmelCase: List[Any] = []
__UpperCAmelCase: Tuple = list(snake_case_ )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(snake_case_ , self.__components ) ) + ")"
def __add__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = len(self )
if size == len(snake_case_ ):
__UpperCAmelCase: str = [self.__components[i] + other.component(snake_case_ ) for i in range(snake_case_ )]
return Vector(snake_case_ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Dict = len(self )
if size == len(snake_case_ ):
__UpperCAmelCase: Dict = [self.__components[i] - other.component(snake_case_ ) for i in range(snake_case_ )]
return Vector(snake_case_ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , snake_case_ ):
'''simple docstring'''
...
@overload
def __mul__( self , snake_case_ ):
'''simple docstring'''
...
def __mul__( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , (float, int) ):
__UpperCAmelCase: str = [c * other for c in self.__components]
return Vector(snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and len(self ) == len(snake_case_ ):
__UpperCAmelCase: Dict = len(self )
__UpperCAmelCase: List[str] = [self.__components[i] * other.component(snake_case_ ) for i in range(snake_case_ )]
return sum(snake_case_ )
else: # error case
raise Exception("""invalid operand!""" )
def lowercase_ ( self ):
'''simple docstring'''
return Vector(self.__components )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCAmelCase: List[str] = value
def lowercase_ ( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__UpperCAmelCase: Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(snake_case_ ) )
def lowercase_ ( self , snake_case_ , snake_case_ = False ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = self * other
__UpperCAmelCase: Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase__ ( _lowercase : int ) -> Vector:
assert isinstance(_lowercase , _lowercase )
return Vector([0] * dimension )
def UpperCamelCase__ ( _lowercase : int , _lowercase : int ) -> Vector:
assert isinstance(_lowercase , _lowercase ) and (isinstance(_lowercase , _lowercase ))
__UpperCAmelCase: int = [0] * dimension
__UpperCAmelCase: int = 1
return Vector(_lowercase )
def UpperCamelCase__ ( _lowercase : float , _lowercase : Vector , _lowercase : Vector ) -> Vector:
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and (isinstance(_lowercase , (int, float) ))
)
return x * scalar + y
def UpperCamelCase__ ( _lowercase : int , _lowercase : int , _lowercase : int ) -> Vector:
random.seed(_lowercase )
__UpperCAmelCase: str = [random.randint(_lowercase , _lowercase ) for _ in range(_lowercase )]
return Vector(_lowercase )
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[str] = matrix
__UpperCAmelCase: Dict = w
__UpperCAmelCase: Tuple = h
def __str__( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , snake_case_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__UpperCAmelCase: List[str] = []
for i in range(self.__height ):
__UpperCAmelCase: List[Any] = [
self.__matrix[i][j] + other.component(snake_case_ , snake_case_ )
for j in range(self.__width )
]
matrix.append(snake_case_ )
return Matrix(snake_case_ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , snake_case_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__UpperCAmelCase: int = []
for i in range(self.__height ):
__UpperCAmelCase: Any = [
self.__matrix[i][j] - other.component(snake_case_ , snake_case_ )
for j in range(self.__width )
]
matrix.append(snake_case_ )
return Matrix(snake_case_ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , snake_case_ ):
'''simple docstring'''
...
@overload
def __mul__( self , snake_case_ ):
'''simple docstring'''
...
def __mul__( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ): # matrix-vector
if len(snake_case_ ) == self.__width:
__UpperCAmelCase: Optional[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__UpperCAmelCase: Dict = [
self.__matrix[i][j] * other.component(snake_case_ )
for j in range(self.__width )
]
ans.change_component(snake_case_ , sum(snake_case_ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(snake_case_ , (int, float) ): # matrix-scalar
__UpperCAmelCase: Dict = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(snake_case_ , self.__width , self.__height )
return None
def lowercase_ ( self ):
'''simple docstring'''
return self.__height
def lowercase_ ( self ):
'''simple docstring'''
return self.__width
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCAmelCase: int = value
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__UpperCAmelCase: int = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(snake_case_ ) ):
__UpperCAmelCase: Optional[Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(snake_case_ , self.__width - 1 , self.__height - 1 ).determinant()
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(snake_case_ , snake_case_ )
else:
raise Exception("""Indices out of bounds""" )
def lowercase_ ( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCAmelCase: Any = [
self.__matrix[0][y] * self.cofactor(0 , snake_case_ ) for y in range(self.__width )
]
return sum(snake_case_ )
def UpperCamelCase__ ( _lowercase : int ) -> Matrix:
__UpperCAmelCase: list[list[float]] = [[0] * n for _ in range(_lowercase )]
return Matrix(_lowercase , _lowercase , _lowercase )
def UpperCamelCase__ ( _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int ) -> Matrix:
random.seed(_lowercase )
__UpperCAmelCase: list[list[float]] = [
[random.randint(_lowercase , _lowercase ) for _ in range(_lowercase )] for _ in range(_lowercase )
]
return Matrix(_lowercase , _lowercase , _lowercase ) | 466 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowerCamelCase : Dict =logging.get_logger('''transformers.models.speecht5''')
lowerCamelCase : Any ={
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
lowerCamelCase : Union[str, Any] ={
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
lowerCamelCase : int ={
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
lowerCamelCase : List[str] ={
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
lowerCamelCase : List[str] ={
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
lowerCamelCase : Optional[int] ={
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
lowerCamelCase : List[str] ={
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
lowerCamelCase : Any ={
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
lowerCamelCase : int ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowerCamelCase : Any ={
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCamelCase : Optional[int] ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCamelCase : Any =[]
lowerCamelCase : Dict =[
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
lowerCamelCase : Any =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
lowerCamelCase : Tuple =IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
lowerCamelCase : Any =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
for attribute in key.split("." ):
UpperCamelCase__ : Optional[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
UpperCamelCase__ : Optional[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
UpperCamelCase__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
UpperCamelCase__ : Optional[int] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Optional[int] = value
elif weight_type == "running_mean":
UpperCamelCase__ : str = value
elif weight_type == "running_var":
UpperCamelCase__ : Tuple = value
elif weight_type == "num_batches_tracked":
UpperCamelCase__ : str = value
else:
UpperCamelCase__ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
UpperCamelCase__ : Dict = []
if task == "s2t":
UpperCamelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase__ : Optional[Any] = MAPPING_S2T
UpperCamelCase__ : List[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : int = MAPPING_T2S
UpperCamelCase__ : int = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCamelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase__ : Tuple = MAPPING_S2S
UpperCamelCase__ : List[Any] = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(__lowerCAmelCase , __lowerCAmelCase ):
logger.info(f'{name} was ignored' )
continue
UpperCamelCase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase__ : int = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = key.split(".*." )
if prefix in name and suffix in name:
UpperCamelCase__ : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCamelCase__ : List[str] = True
if "*" in mapped_key:
UpperCamelCase__ : Optional[int] = name.split(__lowerCAmelCase )[0].split("." )[-2]
UpperCamelCase__ : str = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
UpperCamelCase__ : List[str] = "weight_g"
elif "weight_v" in name:
UpperCamelCase__ : Dict = "weight_v"
elif "bias" in name:
UpperCamelCase__ : List[Any] = "bias"
elif "weight" in name:
UpperCamelCase__ : List[Any] = "weight"
elif "running_mean" in name:
UpperCamelCase__ : Optional[int] = "running_mean"
elif "running_var" in name:
UpperCamelCase__ : Dict = "running_var"
elif "num_batches_tracked" in name:
UpperCamelCase__ : Dict = "num_batches_tracked"
else:
UpperCamelCase__ : Optional[Any] = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase__ : Union[str, Any] = full_name.split("conv_layers." )[-1]
UpperCamelCase__ : Any = name.split("." )
UpperCamelCase__ : List[str] = int(items[0] )
UpperCamelCase__ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
UpperCamelCase__ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
UpperCamelCase__ : Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Dict:
if config_path is not None:
UpperCamelCase__ : List[Any] = SpeechTaConfig.from_pretrained(__lowerCAmelCase )
else:
UpperCamelCase__ : List[Any] = SpeechTaConfig()
if task == "s2t":
UpperCamelCase__ : List[str] = config.max_text_positions
UpperCamelCase__ : Optional[int] = SpeechTaForSpeechToText(__lowerCAmelCase )
elif task == "t2s":
UpperCamelCase__ : Optional[int] = 1876
UpperCamelCase__ : int = 600
UpperCamelCase__ : Any = config.max_speech_positions
UpperCamelCase__ : Union[str, Any] = SpeechTaForTextToSpeech(__lowerCAmelCase )
elif task == "s2s":
UpperCamelCase__ : List[Any] = 1876
UpperCamelCase__ : Optional[int] = config.max_speech_positions
UpperCamelCase__ : Union[str, Any] = SpeechTaForSpeechToSpeech(__lowerCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
UpperCamelCase__ : Optional[int] = SpeechTaTokenizer(__lowerCAmelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken("<mask>" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
UpperCamelCase__ : Optional[Any] = SpeechTaFeatureExtractor()
UpperCamelCase__ : int = SpeechTaProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
UpperCamelCase__ : Tuple = torch.load(__lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint["model"] , __lowerCAmelCase , __lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Tuple =argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase : Optional[int] =parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 228 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __a ( A__ ):
_lowerCAmelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline | 228 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ('''foo.json''',)] )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=__lowerCAmelCase ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase ,config_name=__lowerCAmelCase )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(__lowerCAmelCase ,config_name=__lowerCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample ,__lowerCAmelCase )
self.assertEqual(loaded_config.temperature ,0.7 )
self.assertEqual(loaded_config.length_penalty ,1.0 )
self.assertEqual(loaded_config.bad_words_ids ,[[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k ,50 )
self.assertEqual(loaded_config.max_length ,20 )
self.assertEqual(loaded_config.max_time ,__lowerCAmelCase )
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(__lowerCAmelCase )
lowerCAmelCase__ : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCAmelCase ,__lowerCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id ,default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id ,model_config.eos_token_id )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = GenerationConfig()
lowerCAmelCase__ : Union[str, Any] = {
'''max_new_tokens''': 10_24,
'''foo''': '''bar''',
}
lowerCAmelCase__ : Tuple = copy.deepcopy(__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = generation_config.update(**__lowerCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens ,10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCAmelCase ,{'''foo''': '''bar'''} )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = GenerationConfig()
lowerCAmelCase__ : List[str] = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(__lowerCAmelCase )
lowerCAmelCase__ : Optional[Any] = GenerationConfig.from_pretrained(__lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo ,'''bar''' )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_model_config(__lowerCAmelCase )
assert not hasattr(__lowerCAmelCase ,'''foo''' ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = GenerationConfig()
self.assertEqual(default_config.temperature ,1.0 )
self.assertEqual(default_config.do_sample ,__lowerCAmelCase )
self.assertEqual(default_config.num_beams ,1 )
lowerCAmelCase__ : int = GenerationConfig(
do_sample=__lowerCAmelCase ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
self.assertEqual(config.temperature ,0.7 )
self.assertEqual(config.do_sample ,__lowerCAmelCase )
self.assertEqual(config.num_beams ,1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
lowerCAmelCase__ : str = GenerationConfig.from_pretrained(__lowerCAmelCase ,temperature=1.0 )
self.assertEqual(loaded_config.temperature ,1.0 )
self.assertEqual(loaded_config.do_sample ,__lowerCAmelCase )
self.assertEqual(loaded_config.num_beams ,1 ) # default value
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ (cls ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def lowerCAmelCase__ (cls ) -> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token ,repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = GenerationConfig(
do_sample=__lowerCAmelCase ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('''test-generation-config''' ,use_auth_token=self._token )
lowerCAmelCase__ : Optional[Any] = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase ,repo_id='''test-generation-config''' ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = GenerationConfig(
do_sample=__lowerCAmelCase ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('''valid_org/test-generation-config-org''' ,use_auth_token=self._token )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase ,repo_id='''valid_org/test-generation-config-org''' ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token )
lowerCAmelCase__ : str = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
| 707 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__snake_case : Optional[int] =(3, 9, -1_1, 0, 7, 5, 1, -1)
__snake_case : str =(4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =42
snake_case_ =42
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Node | None = None
for i in sorted(__lowerCamelCase ,reverse=__lowerCamelCase ):
lowerCAmelCase__ : Dict = Node(__lowerCamelCase ,self.head )
def __iter__(self ) -> Iterator[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase__ : Dict = node.next_node
def __len__(self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self ) -> str:
"""simple docstring"""
return " -> ".join([str(__lowerCamelCase ) for node in self] )
def lowerCAmelCase__ ( lowerCamelCase_ : SortedLinkedList ,lowerCamelCase_ : SortedLinkedList):
'''simple docstring'''
return SortedLinkedList(list(lowerCamelCase_) + list(lowerCamelCase_))
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Any =SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 90 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : float, A_ : float, A_ : float, ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE :Union[str, Any] = parse(importlib.metadata.version('''torch'''))
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Version] , lowerCAmelCase_ :str , lowerCAmelCase_ :str )->Any:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
snake_case_ = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = parse(importlib.metadata.version(lowerCAmelCase_ ) )
return operation(lowerCAmelCase_ , parse(lowerCAmelCase_ ) )
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str )->Any:
'''simple docstring'''
return compare_versions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 283 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE( snake_case_ : List[str] ) ->int:
'''simple docstring'''
_lowercase : List[Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
_lowercase : Optional[Any] = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
_lowercase : Dict = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowercase : Any = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
_lowercase : Dict = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(snake_case_ )-1}" )
if "norm" in key:
_lowercase : List[Any] = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowercase : Dict = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
_lowercase : Optional[Any] = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(snake_case_ )-1}" )
if "layer_norm1" in key:
_lowercase : Dict = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
_lowercase : Optional[int] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
_lowercase : str = key[key.find('''block''' ) + len('''block''' )]
_lowercase : Optional[int] = key.replace(F"block{idx}" , F"block.{int(snake_case_ )-1}" )
if "attn.q" in key:
_lowercase : Optional[int] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
_lowercase : Dict = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
_lowercase : str = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
_lowercase : Union[str, Any] = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
_lowercase : List[str] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
_lowercase : int = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
_lowercase : List[str] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
_lowercase : Tuple = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowercase : Optional[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
_lowercase : int = key.replace(F"linear_c{idx}" , F"linear_c.{int(snake_case_ )-1}" )
if "bot_conv" in key:
_lowercase : int = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
_lowercase : Optional[Any] = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
_lowercase : List[Any] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
_lowercase : List[str] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
_lowercase : Dict = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
_lowercase : int = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
_lowercase : Union[str, Any] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
_lowercase : Tuple = key.replace('''module.last_layer_depth''' , '''head.head''' )
_lowercase : List[str] = value
return new_state_dict
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowercase : Optional[Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
_lowercase : Tuple = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
_lowercase : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
_lowercase : Any = kv_bias[: config.hidden_sizes[i]]
_lowercase : Any = kv_weight[
config.hidden_sizes[i] :, :
]
_lowercase : Any = kv_bias[config.hidden_sizes[i] :]
def _SCREAMING_SNAKE_CASE( ) ->Dict:
'''simple docstring'''
_lowercase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase : List[str] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return image
@torch.no_grad()
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Any=False , snake_case_ : Optional[int]=None ) ->str:
'''simple docstring'''
_lowercase : Dict = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowercase : List[str] = GLPNImageProcessor()
# prepare image
_lowercase : int = prepare_img()
_lowercase : Any = image_processor(images=snake_case_ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
_lowercase : str = torch.load(snake_case_ , map_location=torch.device('''cpu''' ) )
# rename keys
_lowercase : str = rename_keys(snake_case_ )
# key and value matrices need special treatment
read_in_k_v(snake_case_ , snake_case_ )
# create HuggingFace model and load state dict
_lowercase : Tuple = GLPNForDepthEstimation(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# forward pass
_lowercase : str = model(snake_case_ )
_lowercase : str = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowercase : List[str] = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
_lowercase : Optional[Any] = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"Unknown model name: {model_name}" )
_lowercase : int = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case_ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=snake_case_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=snake_case_ , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
lowerCamelCase__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 709 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : Dict , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Optional[int] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : str , *UpperCamelCase_ : int , **UpperCamelCase_ : int ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Optional[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[str] , *UpperCamelCase_ : str , **UpperCamelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Any , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Optional[int] , *UpperCamelCase_ : str , **UpperCamelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Optional[int] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : Any , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Tuple , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Union[str, Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : str , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Dict , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : int ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Optional[Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Any , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : List[str] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[str] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Optional[int] , *UpperCamelCase_ : int , **UpperCamelCase_ : int ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Optional[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int] ) -> int:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[str] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : int , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : int , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : str , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Tuple ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : int , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : str , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Tuple , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Dict , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : Dict , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Optional[int] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
| 411 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
lowercase = 4
lowercase = (1 << p) - 1
for _ in range(p - 2 ):
lowercase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
A__ : Tuple =1
for i in range(1, num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
A__ : Optional[Any] =0
while number > 0:
A__ : List[Any] =number % 1_0
sum_of_digits += last_digit
A__ : str =number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 1_0_0 ) -> int:
A__ : List[str] =factorial(snake_case_ )
A__ : str =split_and_add(snake_case_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 416 | 0 |
lowercase_: Optional[Any] = {str(digit): digit**5 for digit in range(10)}
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_lowerCamelCase))
def _lowercase ( ):
"""simple docstring"""
return sum(
number
for number in range(1_000 , 1_000_000)
if number == digits_fifth_powers_sum(_lowerCamelCase))
if __name__ == "__main__":
print(solution())
| 716 |
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Any = 1
snake_case__ : Dict = 2
while i * i <= n:
snake_case__ : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowercase ( ):
"""simple docstring"""
snake_case__ : int = 1
snake_case__ : str = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCAmelCase_) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 127 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def a ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=A__ )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowercase :
def __init__( self : Tuple , _lowercase : List[str] , _lowercase : Union[str, Any]=13 , _lowercase : Optional[Any]=7 , _lowercase : int=True , _lowercase : str=False , _lowercase : Tuple=99 , _lowercase : Dict=16 , _lowercase : List[str]=2 , _lowercase : Optional[int]=4 , _lowercase : Any=4 , _lowercase : Any="relu" , _lowercase : List[str]=0.1 , _lowercase : Any=0.1 , _lowercase : List[str]=0.0 , _lowercase : Tuple=0.0 , _lowercase : Optional[int]=20 , _lowercase : Any=2 , _lowercase : Union[str, Any]=1 , _lowercase : int=0 , ):
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = bos_token_id
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Any = self.get_config()
SCREAMING_SNAKE_CASE__ : Tuple = prepare_mam_aaa_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def lowercase__ ( self : Optional[int] ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : Tuple , _lowercase : str , _lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = MaMaaaModel(config=_lowercase ).get_decoder().to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict['''attention_mask''']
SCREAMING_SNAKE_CASE__ : Tuple = inputs_dict['''head_mask''']
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Any = model(_lowercase , attention_mask=_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-2 ) )
def lowercase__ ( self : Any , _lowercase : Dict , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaMaaaModel(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE__ : str = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : List[str] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Any = model.get_encoder()
encoder.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = MaMaaaEncoder.from_pretrained(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int = model.get_decoder()
decoder.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : int = MaMaaaDecoder.from_pretrained(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=_lowercase , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCamelCase : str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase : Dict = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[str] = True
lowerCamelCase : Tuple = True
lowerCamelCase : Any = False
lowerCamelCase : int = False
def lowercase__ ( self : List[str] , _lowercase : Tuple , _lowercase : Any , _lowercase : Tuple , _lowercase : str , _lowercase : List[Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = model_class.from_pretrained(_lowercase , output_loading_info=_lowercase )
self.assertEqual(info['''missing_keys'''] , [] )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE__ : Dict = model_class(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = copy.deepcopy(self._prepare_for_class(_lowercase , _lowercase ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : int = inputs['''input_ids''']
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE__ : int = inputs['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs.get('''decoder_input_ids''' , _lowercase )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , _lowercase )
SCREAMING_SNAKE_CASE__ : str = model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : List[str] = wte(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = wte(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = wte(_lowercase )
with torch.no_grad():
model(**_lowercase )[0]
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : List[Any] = input_ids.ne(1 ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaMaaaForConditionalGeneration(_lowercase ).eval().to(_lowercase )
if torch_device == "cuda":
model.half()
model.generate(_lowercase , attention_mask=_lowercase )
model.generate(num_beams=4 , do_sample=_lowercase , early_stopping=_lowercase , num_return_sequences=3 )
def a ( A__ ) -> List[str]:
'''simple docstring'''
return torch.tensor(A__ , dtype=torch.long , device=A__ )
a_ :int = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : List[Any] ):
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : List[str] = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
SCREAMING_SNAKE_CASE__ : Tuple = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(model.config , _lowercase , _lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , _lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=_lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : int = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(_lowercase )
# change to intended input
SCREAMING_SNAKE_CASE__ : Any = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
SCREAMING_SNAKE_CASE__ : List[str] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , _lowercase , _lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=_lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : int = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
SCREAMING_SNAKE_CASE__ : int = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Tuple = model.generate(
input_ids=dct['''input_ids'''].to(_lowercase ) , attention_mask=dct['''attention_mask'''].to(_lowercase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_lowercase , skip_special_tokens=_lowercase )
assert generated == expected_en
| 35 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "dandelin/vilt-b32-finetuned-vqa"
__A = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__A = "image_qa"
__A = AutoProcessor
__A = AutoModelForVisualQuestionAnswering
__A = ["image", "text"]
__A = ["text"]
def __init__( self : List[Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : str ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
def a ( self : int , __lowerCAmelCase : "Image" , __lowerCAmelCase : str ):
"""simple docstring"""
return self.pre_processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors='pt' )
def a ( self : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
with torch.no_grad():
return self.model(**__lowerCAmelCase ).logits
def a ( self : Tuple , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCAmelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 309 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 / 255 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCamelCase = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_pad
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["shortest_edge"] * h / w )
UpperCamelCase = self.size["shortest_edge"]
elif w > h:
UpperCamelCase = self.size["shortest_edge"]
UpperCamelCase = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase = self.size["shortest_edge"]
UpperCamelCase = self.size["shortest_edge"]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = DetrImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = DetrImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_rescale" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "rescale_factor" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_pad" ) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
# prepare image and target
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"image_id": 39769, "annotations": target}
# encode them
UpperCamelCase = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
UpperCamelCase = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image, target and masks_path
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
UpperCamelCase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCamelCase = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
UpperCamelCase = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , masks_path=SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCamelCase = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE ) )
| 414 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __magic_name__ ( lowercase_ = "isbn/0140328726" ) -> dict:
'''simple docstring'''
UpperCamelCase = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
UpperCamelCase = f'''{olid} is not a valid Open Library olid'''
raise ValueError(lowercase_ )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def __magic_name__ ( lowercase_ ) -> dict:
'''simple docstring'''
UpperCamelCase = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
UpperCamelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCamelCase = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
UpperCamelCase = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCamelCase = ", ".join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__a : Optional[Any] = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__a : Any = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print("""\n""".join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.')
| 414 | 1 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ConsistencyModelPipeline
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
snake_case__ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCAmelCase__ (self: Any ) -> Any:
'''simple docstring'''
__a : Optional[Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def UpperCAmelCase__ (self: int ) -> Union[str, Any]:
'''simple docstring'''
__a : int = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def UpperCAmelCase__ (self: Tuple , __UpperCAmelCase: Union[str, Any]=False ) -> int:
'''simple docstring'''
if class_cond:
__a : List[Any] = self.dummy_cond_unet
else:
__a : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
__a : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__a : int = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCAmelCase__ (self: int , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Tuple=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__UpperCAmelCase ).startswith("mps" ):
__a : Tuple = torch.manual_seed(__UpperCAmelCase )
else:
__a : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__a : Optional[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def UpperCAmelCase__ (self: Optional[int] ) -> Optional[int]:
'''simple docstring'''
__a : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__a : int = self.get_dummy_components()
__a : List[str] = ConsistencyModelPipeline(**__UpperCAmelCase )
__a : Optional[int] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Any = self.get_dummy_inputs(__UpperCAmelCase )
__a : Union[str, Any] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__a : Any = image[0, -3:, -3:, -1]
__a : Optional[Any] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ (self: List[Any] ) -> Optional[int]:
'''simple docstring'''
__a : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__a : List[str] = self.get_dummy_components(class_cond=__UpperCAmelCase )
__a : Optional[Any] = ConsistencyModelPipeline(**__UpperCAmelCase )
__a : str = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
__a : Tuple = 0
__a : List[Any] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__a : str = image[0, -3:, -3:, -1]
__a : Union[str, Any] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ (self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__a : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__a : Optional[int] = self.get_dummy_components()
__a : List[Any] = ConsistencyModelPipeline(**__UpperCAmelCase )
__a : Dict = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase )
__a : Union[str, Any] = 1
__a : Optional[int] = None
__a : Optional[Any] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__a : List[str] = image[0, -3:, -3:, -1]
__a : int = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ (self: Union[str, Any] ) -> Any:
'''simple docstring'''
__a : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
__a : Optional[Any] = self.get_dummy_components(class_cond=__UpperCAmelCase )
__a : Dict = ConsistencyModelPipeline(**__UpperCAmelCase )
__a : Optional[int] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
__a : List[str] = 1
__a : int = None
__a : Tuple = 0
__a : List[str] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
__a : List[str] = image[0, -3:, -3:, -1]
__a : Any = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ (self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self: Optional[Any] , __UpperCAmelCase: int=0 , __UpperCAmelCase: Union[str, Any]=False , __UpperCAmelCase: int="cpu" , __UpperCAmelCase: List[str]=torch.floataa , __UpperCAmelCase: List[Any]=(1, 3, 64, 64) ) -> Optional[int]:
'''simple docstring'''
__a : Tuple = torch.manual_seed(__UpperCAmelCase )
__a : List[str] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
__a : Optional[int] = self.get_fixed_latents(seed=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase , shape=__UpperCAmelCase )
__a : List[str] = latents
return inputs
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: int=0 , __UpperCAmelCase: int="cpu" , __UpperCAmelCase: Any=torch.floataa , __UpperCAmelCase: List[Any]=(1, 3, 64, 64) ) -> str:
'''simple docstring'''
if type(__UpperCAmelCase ) == str:
__a : Optional[Any] = torch.device(__UpperCAmelCase )
__a : Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__a : Optional[Any] = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
return latents
def UpperCAmelCase__ (self: Optional[Any] ) -> str:
'''simple docstring'''
__a : Optional[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__a : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__a : List[Any] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Tuple = self.get_inputs()
__a : Union[str, Any] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = image[0, -3:, -3:, -1]
__a : Optional[int] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCAmelCase__ (self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__a : str = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__a : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__a : int = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : List[Any] = self.get_inputs()
__a : Union[str, Any] = 1
__a : Dict = None
__a : int = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__a : int = image[0, -3:, -3:, -1]
__a : int = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def UpperCAmelCase__ (self: str ) -> Optional[Any]:
'''simple docstring'''
__a : List[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__a : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__a : List[str] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Optional[Any] = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
__a : List[str] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = image[0, -3:, -3:, -1]
__a : Union[str, Any] = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def UpperCAmelCase__ (self: List[Any] ) -> str:
'''simple docstring'''
__a : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__a : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
__a : Union[str, Any] = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__a : Tuple = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
__a : str = 1
__a : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
__a : Optional[Any] = pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
__a : Union[str, Any] = image[0, -3:, -3:, -1]
__a : Optional[int] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 351 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ (__UpperCAmelCase: ArgumentParser ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ (self: List[str] ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
| 351 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
class lowercase :
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [[] for _ in range(A_ )]
SCREAMING_SNAKE_CASE : int = size
def __getitem__( self : Optional[int] , snake_case : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self._size
def lowerCamelCase_ ( self : Any , snake_case : int , snake_case : int , snake_case : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(A_ , A_ ) )
def lowerCamelCase_ ( self : Union[str, Any] , snake_case : int , snake_case : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = deque([start_vertex] )
SCREAMING_SNAKE_CASE : Optional[int] = [None] * self.size
SCREAMING_SNAKE_CASE : Optional[int] = 0
while queue:
SCREAMING_SNAKE_CASE : List[str] = queue.popleft()
SCREAMING_SNAKE_CASE : Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE : Optional[Any] = current_distance + edge.weight
SCREAMING_SNAKE_CASE : Tuple = distances[edge.destination_vertex]
if (
isinstance(A_ , A_ )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE : Any = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 700 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase : Dict = logging.get_logger(__name__)
@dataclass
class lowercase :
'''simple docstring'''
UpperCAmelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
UpperCAmelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
UpperCAmelCase : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.task_name.lower()
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : List[Any] = 'train'
UpperCAmelCase : Optional[Any] = 'dev'
UpperCAmelCase : Optional[int] = 'test'
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : GlueDataTrainingArguments
UpperCAmelCase : str
UpperCAmelCase : List[InputFeatures]
def __init__( self : Union[str, Any] , snake_case : GlueDataTrainingArguments , snake_case : PreTrainedTokenizerBase , snake_case : Optional[int] = None , snake_case : Union[str, Split] = Split.train , snake_case : Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , snake_case , )
SCREAMING_SNAKE_CASE : Tuple = args
SCREAMING_SNAKE_CASE : int = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE : str = glue_output_modes[args.task_name]
if isinstance(snake_case , snake_case ):
try:
SCREAMING_SNAKE_CASE : Any = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
SCREAMING_SNAKE_CASE : Any = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE : Union[str, Any] = cached_features_file + '.lock'
with FileLock(snake_case ):
if os.path.exists(snake_case ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE : Optional[int] = time.time()
SCREAMING_SNAKE_CASE : int = torch.load(snake_case )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
SCREAMING_SNAKE_CASE : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE : Dict = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE : str = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = examples[:limit_length]
SCREAMING_SNAKE_CASE : Optional[Any] = glue_convert_examples_to_features(
snake_case , snake_case , max_length=args.max_seq_length , label_list=snake_case , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE : Tuple = time.time()
torch.save(self.features , snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : int ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict , snake_case : Optional[int] ):
'''simple docstring'''
return self.features[i]
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.label_list | 308 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ ={
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 616 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase__ =logging.get_logger(__name__)
UpperCAmelCase__ ={
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase__ ( _a ):
a : Tuple = """gptj"""
a : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , A_ : Optional[int]=5_0_4_0_0 , A_ : Optional[Any]=2_0_4_8 , A_ : Optional[Any]=4_0_9_6 , A_ : Any=2_8 , A_ : Union[str, Any]=1_6 , A_ : int=6_4 , A_ : int=None , A_ : str="gelu_new" , A_ : str=0.0 , A_ : Optional[Any]=0.0 , A_ : Dict=0.0 , A_ : Dict=1e-5 , A_ : Optional[int]=0.02 , A_ : List[str]=True , A_ : List[Any]=5_0_2_5_6 , A_ : Optional[int]=5_0_2_5_6 , A_ : List[Any]=False , **A_ : int , ):
'''simple docstring'''
__lowercase = vocab_size
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = n_inner
__lowercase = rotary_dim
__lowercase = activation_function
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ )
class lowerCamelCase__ ( _a ):
def __init__( self : int , A_ : PretrainedConfig , A_ : str = "default" , A_ : List[PatchingSpec] = None , A_ : bool = False , ):
'''simple docstring'''
super().__init__(A_ , task=A_ , patching_specs=A_ , use_past=A_ )
if not getattr(self._config , """pad_token_id""" , A_ ):
# TODO: how to do that better?
__lowercase = 0
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A_ , direction="""inputs""" )
__lowercase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return self._config.n_head
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase = super(A_ , self ).generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase , __lowercase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers )
]
__lowercase = common_inputs["""attention_mask"""]
if self.use_past:
__lowercase = ordered_inputs["""attention_mask"""].dtype
__lowercase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return 1_3
| 616 | 1 |
'''simple docstring'''
def __snake_case ( lowercase : float ):
return 10 - x * x
def __snake_case ( lowercase : float , lowercase : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(lowercase ) * equation(lowercase ) >= 0:
raise ValueError("Wrong space!" )
snake_case_ = a
while (b - a) >= 0.01:
# Find middle point
snake_case_ = (a + b) / 2
# Check if middle point is root
if equation(lowercase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowercase ) * equation(lowercase ) < 0:
snake_case_ = c
else:
snake_case_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 710 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowercase : list ):
if len(lowercase ) == 0:
return []
snake_case_ , snake_case_ = min(lowercase ), max(lowercase )
snake_case_ = int(max_value - min_value ) + 1
snake_case_ = [[] for _ in range(lowercase )]
for i in my_list:
buckets[int(i - min_value )].append(lowercase )
return [v for bucket in buckets for v in sorted(lowercase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 420 | 0 |
def __A(lowerCAmelCase ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = len(lowerCAmelCase )
for i in range(lowerCAmelCase ):
for j in range(i + 1 , lowerCAmelCase ):
if numbers[j] < numbers[i]:
_UpperCamelCase , _UpperCamelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 612 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCamelCase__ = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Tuple = "facebook/nllb-200-distilled-600M"
UpperCamelCase_ : Optional[Any] = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
UpperCamelCase_ : Any = "translator"
UpperCamelCase_ : Union[str, Any] = AutoTokenizer
UpperCamelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM
UpperCamelCase_ : Any = LANGUAGE_CODES
UpperCamelCase_ : Optional[int] = ["text", "text", "text"]
UpperCamelCase_ : Optional[Any] = ["text"]
def A_ ( self , a , a , a ) -> str:
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
_UpperCamelCase = self.lang_to_code[src_lang]
_UpperCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
a , return_tensors="""pt""" , src_lang=a , tgt_lang=a )
def A_ ( self , a ) -> Any:
'''simple docstring'''
return self.model.generate(**a )
def A_ ( self , a ) -> Optional[Any]:
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=a )
| 612 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 'xmod'
def __init__( self : Dict ,lowercase__ : str=3_0_5_2_2 ,lowercase__ : List[Any]=7_6_8 ,lowercase__ : List[Any]=1_2 ,lowercase__ : Optional[Any]=1_2 ,lowercase__ : Tuple=3_0_7_2 ,lowercase__ : List[str]="gelu" ,lowercase__ : Dict=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]=5_1_2 ,lowercase__ : List[Any]=2 ,lowercase__ : int=0.0_2 ,lowercase__ : int=1e-1_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[str]=0 ,lowercase__ : Dict=2 ,lowercase__ : List[str]="absolute" ,lowercase__ : Dict=True ,lowercase__ : Any=None ,lowercase__ : Optional[Any]=False ,lowercase__ : List[str]=2 ,lowercase__ : List[str]=False ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[Any]=True ,lowercase__ : Any=("en_XX",) ,lowercase__ : int=None ,**lowercase__ : str ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
__lowercase = pre_norm
__lowercase = adapter_reduction_factor
__lowercase = adapter_layer_norm
__lowercase = adapter_reuse_layer_norm
__lowercase = ln_before_adapter
__lowercase = list(lowercase__ )
__lowercase = default_language
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 624 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any]=1_3 ,lowercase__ : Union[str, Any]=7 ,lowercase__ : List[Any]=True ,lowercase__ : str=True ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Tuple=9_9 ,lowercase__ : List[str]=[1, 1, 2] ,lowercase__ : List[Any]=1 ,lowercase__ : List[Any]=3_2 ,lowercase__ : int=4 ,lowercase__ : Tuple=8 ,lowercase__ : Tuple=3_7 ,lowercase__ : str="gelu_new" ,lowercase__ : Dict=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Optional[Any]=0.0 ,lowercase__ : Tuple=5_1_2 ,lowercase__ : Any=3 ,lowercase__ : List[str]=0.0_2 ,lowercase__ : List[Any]=3 ,lowercase__ : List[Any]=4 ,lowercase__ : Optional[Any]=None ,lowercase__ : Tuple=False ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,):
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,):
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : List[str] ,):
__lowercase = TFFunnelForPreTraining(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,):
__lowercase = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,):
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Any ,):
__lowercase = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Any = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self ,base=lowercase__ )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 624 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase :
def __init__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : int=True , __lowerCamelCase : Any=9_9 , __lowerCamelCase : Tuple=3_2 , __lowerCamelCase : str=5 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]=3_7 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : int=5_1_2 , __lowerCamelCase : Dict=1_6 , __lowerCamelCase : str=2 , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Union[str, Any]=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowerCamelCase , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = FalconModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str , ):
"""simple docstring"""
_snake_case = True
_snake_case = FalconModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : str , ):
"""simple docstring"""
_snake_case = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : str , ):
"""simple docstring"""
_snake_case = True
_snake_case = True
_snake_case = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
_snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['''hidden_states'''][0]
_snake_case = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['''hidden_states'''][0]
# select random slice
_snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Optional[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : Tuple = (FalconForCausalLM,) if is_torch_available() else ()
A__ : Optional[Any] = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : List[Any] = False
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = FalconModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case , *_snake_case = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_snake_case = alibi
self.model_tester.create_and_check_model(__lowerCamelCase , *__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = input_dict['''input_ids''']
_snake_case = input_ids.ne(1 ).to(__lowerCamelCase )
_snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = '''single_label_classification'''
_snake_case = input_dict['''input_ids''']
_snake_case = input_ids.ne(1 ).to(__lowerCamelCase )
_snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = input_dict['''input_ids''']
_snake_case = FalconForCausalLM(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , use_cache=__lowerCamelCase )
_snake_case = input_ids.shape[0]
_snake_case = model._convert_to_rw_cache(result.past_key_values )
_snake_case = model._convert_cache_to_standard_format(__lowerCamelCase , __lowerCamelCase )
for layer in range(len(__lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = '''multi_label_classification'''
_snake_case = input_dict['''input_ids''']
_snake_case = input_ids.ne(1 ).to(__lowerCamelCase )
_snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_snake_case = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowerCamelCase , '''use_cache''' ):
return
_snake_case = model_class(__lowerCamelCase ).to(__lowerCamelCase )
if "use_cache" not in inputs:
_snake_case = True
_snake_case = model(**__lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_snake_case = (
getattr(__lowerCamelCase , '''decoder_layers''' , __lowerCamelCase )
or getattr(__lowerCamelCase , '''num_decoder_layers''' , __lowerCamelCase )
or config.num_hidden_layers
)
_snake_case = getattr(__lowerCamelCase , '''num_kv_heads''' , config.num_attention_heads )
_snake_case = getattr(__lowerCamelCase , '''d_model''' , config.hidden_size )
_snake_case = embed_dim // num_attention_heads
_snake_case = outputs['''past_key_values''']
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_snake_case , _snake_case = inputs['''input_ids'''].shape
for i in range(__lowerCamelCase ):
if config.new_decoder_architecture:
_snake_case = config.num_attention_heads
elif config.multi_query:
_snake_case = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
_snake_case = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__lowerCamelCase )
_snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
_snake_case = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
_snake_case = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=1_9 )
_snake_case = tokenizer.batch_decode(__lowerCamelCase )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_snake_case = AutoTokenizer.from_pretrained(__lowerCamelCase )
_snake_case = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(__lowerCamelCase )
_snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 )
model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=4 )
model.generate(**__lowerCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_snake_case = AutoTokenizer.from_pretrained(__lowerCamelCase )
_snake_case = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(device=__lowerCamelCase )
_snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCamelCase )
# Test results are the same with and without cache
_snake_case = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=2_0 , use_cache=__lowerCamelCase )
_snake_case = model.generate(**__lowerCamelCase , do_sample=__lowerCamelCase , max_new_tokens=2_0 , use_cache=__lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 103 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : list[float] , __UpperCamelCase : Any ):
'''simple docstring'''
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(__UpperCamelCase ):
print(F'{i}\t\t{d}' )
def __lowerCAmelCase ( __UpperCamelCase : list[dict[str, int]] , __UpperCamelCase : list[float] , __UpperCamelCase : int ):
'''simple docstring'''
for j in range(__UpperCamelCase ):
snake_case_ : Any = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def __lowerCAmelCase ( __UpperCamelCase : list[dict[str, int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = [float("""inf""" )] * vertex_count
snake_case_ : List[str] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__UpperCamelCase ):
snake_case_ : Tuple = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case_ : Optional[Any] = distance[u] + w
snake_case_ : Tuple = check_negative_cycle(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] = int(input('''Enter number of vertices: ''').strip())
__lowerCAmelCase : List[Any] = int(input('''Enter number of edges: ''').strip())
__lowerCAmelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__lowerCAmelCase : Dict = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__lowerCAmelCase : List[str] = {'''src''': src, '''dst''': dest, '''weight''': weight}
__lowerCAmelCase : List[Any] = int(input('''\nEnter shortest path source:''').strip())
__lowerCAmelCase : Union[str, Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_SCREAMING_SNAKE_CASE )} )
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowercase : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowercase : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowercase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowercase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''train'''
_lowercase : Union[str, Any] = '''dev'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = False , _lowercase = None , _lowercase = "pt" , ):
"""simple docstring"""
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features["""features"""]
_lowerCAmelCase = self.old_features.get("""dataset""" , _lowercase )
_lowerCAmelCase = self.old_features.get("""examples""" , _lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
_lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 5 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 315 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a =importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a =spec.loader.load_module()
a =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a =re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a ={
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase : int = []
for config_class in list(CONFIG_MAPPING.values() ):
__lowerCamelCase : int = False
# source code of `config_class`
__lowerCamelCase : List[str] = inspect.getsource(lowerCamelCase__ )
__lowerCamelCase : str = _re_checkpoint.findall(lowerCamelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__lowerCamelCase : List[Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__lowerCamelCase : List[str] = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
__lowerCamelCase : List[str] = True
break
__lowerCamelCase : Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Tuple = '\n'.join(sorted(lowerCamelCase__ ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337 | 0 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase : str = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
lowerCAmelCase : Tuple = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = FSMTForConditionalGeneration.from_pretrained(_a ).to(_a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCamelCase = f'facebook/wmt19-{pair}'
lowerCamelCase = self.get_tokenizer(_a )
lowerCamelCase = self.get_model(_a )
lowerCamelCase = bleu_data[pair]["""src"""]
lowerCamelCase = bleu_data[pair]["""tgt"""]
lowerCamelCase = tokenizer(_a , return_tensors="""pt""" , truncation=_a , padding="""longest""" ).to(_a )
lowerCamelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCamelCase = tokenizer.batch_decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
lowerCamelCase = calculate_bleu(_a , _a )
print(_a )
self.assertGreaterEqual(scores["""bleu"""] , _a )
| 543 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a__ ( snake_case__ ) -> Dict[str, torch.Tensor]:
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
for rt in rc.restypes:
lowerCamelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowerCamelCase = {name: i for i, name in enumerate(snake_case__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
lowerCamelCase = torch.tensor(
snake_case__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
lowerCamelCase = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowerCamelCase = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase = restype_atomaa_mask[protein_aatype]
lowerCamelCase = residx_atomaa_mask
lowerCamelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowerCamelCase = restype_atomaa_to_atomaa[protein_aatype]
lowerCamelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowerCamelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowerCamelCase = rc.restype_atoa[restype_letter]
lowerCamelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowerCamelCase = rc.atom_order[atom_name]
lowerCamelCase = 1
lowerCamelCase = restype_atomaa_mask[protein_aatype]
lowerCamelCase = residx_atomaa_mask
return protein
def a__ ( snake_case__ ) -> Dict[str, np.ndarray]:
lowerCamelCase = tree_map(lambda snake_case__ : torch.tensor(snake_case__ , device=batch["""aatype"""].device ) , snake_case__ , np.ndarray )
lowerCamelCase = tensor_tree_map(lambda snake_case__ : np.array(snake_case__ ) , make_atomaa_masks(snake_case__ ) )
return out
| 543 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Dict = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = 0
__lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
__lowerCAmelCase = self.advance()
if not self.does_advance(UpperCamelCase ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase )
counter += 1
if counter > 1_0000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCAmelCase_ ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> Dict:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__lowerCAmelCase = token_ids
__lowerCAmelCase = len(self.token_ids )
__lowerCAmelCase = -1 # the index of the currently fulfilled step
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.fulfilled_idx += 1
__lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
__lowerCAmelCase = True
__lowerCAmelCase = completed
else:
# failed to make progress.
__lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = False
__lowerCAmelCase = 0
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]:
__lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.fulfilled_idx
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]:
__lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] )
__lowerCAmelCase = {}
for token_ids in nested_token_ids:
__lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCamelCase ):
if token_id not in level:
__lowerCAmelCase = {}
__lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F''' {nested_token_ids}.''' )
__lowerCAmelCase = root
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
__lowerCAmelCase = self.trie
for current_token in current_seq:
__lowerCAmelCase = start[current_token]
__lowerCAmelCase = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
__lowerCAmelCase = self.next_tokens(UpperCamelCase )
return len(UpperCamelCase ) == 0
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = list(root.values() )
if len(UpperCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = self.count_leaves(UpperCamelCase )
return len(UpperCamelCase ) != leaf_count
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __init__( self , UpperCamelCase ) -> List[Any]:
super(UpperCamelCase , self ).__init__()
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__lowerCAmelCase = DisjunctiveTrie(UpperCamelCase )
__lowerCAmelCase = nested_token_ids
__lowerCAmelCase = self.trie.max_height
__lowerCAmelCase = []
__lowerCAmelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' )
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
if self.does_advance(UpperCamelCase ):
self.current_seq.append(UpperCamelCase )
__lowerCAmelCase = True
else:
__lowerCAmelCase = True
self.reset()
__lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
__lowerCAmelCase = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = False
__lowerCAmelCase = []
def UpperCAmelCase_ ( self ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]:
__lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
__lowerCAmelCase = self.seqlen
__lowerCAmelCase = self.current_seq
__lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
__lowerCAmelCase = max([c.seqlen for c in constraints] )
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = False
self.init_state()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = []
__lowerCAmelCase = None
__lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints]
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__lowerCAmelCase = constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
else:
__lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase , UpperCamelCase ):
token_list.append(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
token_list.extend(UpperCamelCase )
if len(UpperCamelCase ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self , UpperCamelCase ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
__lowerCAmelCase , __lowerCAmelCase = False, False
if self.completed:
__lowerCAmelCase = True
__lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) )
__lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
__lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase )
__lowerCAmelCase = None
if not complete and stepped:
__lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str:
__lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__lowerCAmelCase = [
constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase )
__lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state | 39 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =[]
A__ : Any =[]
for i in range(self.num_layers ):
A__ : List[str] =self.in_channels if i == 0 else self.out_channels
A__ : str =FlaxResnetBlockaD(
in_channels=UpperCamelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
A__ : Optional[int] =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
A__ : Optional[int] =resnets
A__ : List[Any] =attentions
if self.add_downsample:
A__ : Any =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=True ):
A__ : str =()
for resnet, attn in zip(self.resnets , self.attentions ):
A__ : Dict =resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
A__ : Tuple =attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
A__ : Tuple =self.downsamplers_a(UpperCamelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self : List[str] ):
A__ : List[str] =[]
for i in range(self.num_layers ):
A__ : List[str] =self.in_channels if i == 0 else self.out_channels
A__ : List[Any] =FlaxResnetBlockaD(
in_channels=UpperCamelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
A__ : Optional[Any] =resnets
if self.add_downsample:
A__ : Optional[int] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=True ):
A__ : Optional[int] =()
for resnet in self.resnets:
A__ : List[Any] =resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
A__ : str =self.downsamplers_a(UpperCamelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[Any] =[]
A__ : Optional[int] =[]
for i in range(self.num_layers ):
A__ : Any =self.in_channels if (i == self.num_layers - 1) else self.out_channels
A__ : Any =self.prev_output_channel if i == 0 else self.out_channels
A__ : str =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
A__ : Dict =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
A__ : Dict =resnets
A__ : Dict =attentions
if self.add_upsample:
A__ : List[str] =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
A__ : Optional[int] =res_hidden_states_tuple[-1]
A__ : Dict =res_hidden_states_tuple[:-1]
A__ : Optional[int] =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A__ : List[str] =resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
A__ : str =attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
if self.add_upsample:
A__ : List[str] =self.upsamplers_a(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self : Optional[int] ):
A__ : str =[]
for i in range(self.num_layers ):
A__ : Any =self.in_channels if (i == self.num_layers - 1) else self.out_channels
A__ : int =self.prev_output_channel if i == 0 else self.out_channels
A__ : str =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
A__ : Union[str, Any] =resnets
if self.add_upsample:
A__ : List[Any] =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=True ):
for resnet in self.resnets:
# pop res hidden states
A__ : Union[str, Any] =res_hidden_states_tuple[-1]
A__ : Dict =res_hidden_states_tuple[:-1]
A__ : Optional[int] =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A__ : Tuple =resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
if self.add_upsample:
A__ : Optional[int] =self.upsamplers_a(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self : Union[str, Any] ):
# there is always at least one resnet
A__ : int =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
A__ : str =[]
for _ in range(self.num_layers ):
A__ : Tuple =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase__ )
A__ : Tuple =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase__ )
A__ : Tuple =resnets
A__ : Tuple =attentions
def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=True ):
A__ : Optional[int] =self.resnets[0](UpperCamelCase__ , UpperCamelCase__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
A__ : Dict =attn(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
A__ : Dict =resnet(UpperCamelCase__ , UpperCamelCase__ , deterministic=UpperCamelCase__ )
return hidden_states
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _A ( __a , unittest.TestCase ):
__a = RoCBertTokenizer
__a = None
__a = False
__a = True
__a = filter_non_english
def _lowerCamelCase ( self ) -> str:
super().setUp()
lowerCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowerCamelCase__ = {}
lowerCamelCase__ = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = i
lowerCamelCase__ = i
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> List[str]:
lowerCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase__ = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ ) , [5, 6, 2, 5, 7, 8] )
def _lowerCamelCase ( self ) -> List[Any]:
lowerCamelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowerCamelCase ( self ) -> Optional[int]:
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCamelCase ( self ) -> List[str]:
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowerCamelCase ( self ) -> Tuple:
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCamelCase ( self ) -> Optional[int]:
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCamelCase ( self ) -> str:
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCamelCase ( self ) -> Optional[int]:
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCamelCase ( self ) -> Tuple:
lowerCamelCase__ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowerCamelCase ( self ) -> Tuple:
lowerCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase__ = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = i
lowerCamelCase__ = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowerCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowerCamelCase ( self ) -> Any:
lowerCamelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
lowerCamelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def _lowerCamelCase ( self ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCamelCase__ = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ , "do_lower_case" ) else False
lowerCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = ["的", "人", "有"]
lowerCamelCase__ = "".join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = False
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def _lowerCamelCase ( self ) -> int:
lowerCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase__ = tokenizer.encode("你好" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode("你是谁" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _lowerCamelCase ( self ) -> Optional[int]:
lowerCamelCase__ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCamelCase__ = "你好,你是谁"
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 274 |
"""simple docstring"""
def UpperCAmelCase__ ( ) -> int:
"""simple docstring"""
return 1
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(A__ )
def UpperCAmelCase__ ( A__ = 200 ) -> int:
"""simple docstring"""
return two_pound(A__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 274 | 1 |
'''simple docstring'''
import pytest
lowerCAmelCase_ : Optional[Any] = """__dummy_dataset1__"""
lowerCAmelCase_ : List[str] = """\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"""
@pytest.fixture
def __A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __A ( ) -> int:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = dataset_loading_script_name
_UpperCamelCase : str = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=lowerCAmelCase__ )
_UpperCamelCase : Dict = script_dir / f'''{script_name}.py'''
with open(lowerCAmelCase__ ,"w" ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
| 435 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'dpr'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 695 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 711 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 87 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def _a ( _lowerCamelCase = 8 , _lowerCamelCase = None ) -> str:
"""simple docstring"""
__snake_case : Any = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__snake_case : Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
__snake_case : str = rng.integers(2 , size=_lowerCamelCase )
# The set of states Alice will prepare.
__snake_case : Any = rng.integers(2 , size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
__snake_case : Any = rng.integers(2 , size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
__snake_case : Dict = qiskit.QuantumCircuit(_lowerCamelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__snake_case : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__snake_case : Any = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1 , seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
__snake_case : str = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__snake_case : Any = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__snake_case : Any = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase , """0""" )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 26 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
lowerCAmelCase__ = Path(__file__).parent / """model_card_template.md"""
lowerCAmelCase__ = uuida().hex
lowerCAmelCase__ = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[Dict, str, None] = None ) -> str:
'''simple docstring'''
A__ = F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
ua += "; " + user_agent
return ua
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[str] = None , SCREAMING_SNAKE_CASE_: Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
if token is None:
A__ = HfFolder.get_token()
if organization is None:
A__ = whoami(SCREAMING_SNAKE_CASE_ )["name"]
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(SCREAMING_SNAKE_CASE_ , "local_rank" ) and args.local_rank not in [-1, 0]:
return
A__ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE_ , "hub_token" ) else None
A__ = get_full_repo_name(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ )
A__ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , repo_name=SCREAMING_SNAKE_CASE_ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE_ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE_ , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE_ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE_ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE_ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE_ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE_ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE_ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE_ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
A__ = os.path.join(args.output_dir , "README.md" )
model_card.save(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[str] , SCREAMING_SNAKE_CASE_: Optional[str] = None ) -> List[str]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).as_posix() )
A__ = re.search(R"snapshots/([^/]+)/" , SCREAMING_SNAKE_CASE_ )
if search is None:
return None
A__ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase__ = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowerCAmelCase__ = os.path.join(hf_cache_home, """diffusers""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[str] = None , SCREAMING_SNAKE_CASE_: Optional[str] = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
A__ = DIFFUSERS_CACHE
if old_cache_dir is None:
A__ = old_diffusers_cache
A__ = Path(SCREAMING_SNAKE_CASE_ ).expanduser()
A__ = Path(SCREAMING_SNAKE_CASE_ ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A__ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE_ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
os.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
try:
os.symlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase__ = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowerCAmelCase__ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase__ = int(f.read())
except ValueError:
lowerCAmelCase__ = 0
if cache_version < 1:
lowerCAmelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowerCAmelCase__ = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[str] = None ) -> str:
'''simple docstring'''
if variant is not None:
A__ = weights_name.split("." )
A__ = splits[:-1] + [variant] + splits[-1:]
A__ = ".".join(SCREAMING_SNAKE_CASE_ )
return weights_name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , *,
SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Optional[int]=None , ) -> Optional[int]:
'''simple docstring'''
A__ = str(SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE_ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
# Load from a PyTorch checkpoint
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse("0.20.0" )
):
try:
A__ = hf_hub_download(
SCREAMING_SNAKE_CASE_ , filename=_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , SCREAMING_SNAKE_CASE_ , )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}\' so that the correct variant file can be added.' , SCREAMING_SNAKE_CASE_ , )
try:
# 2. Load model file as usual
A__ = hf_hub_download(
SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}' )
| 514 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = size if size is not None else {"height": 18, "width": 18}
_SCREAMING_SNAKE_CASE : Optional[int] = parent
_SCREAMING_SNAKE_CASE : Any = batch_size
_SCREAMING_SNAKE_CASE : List[str] = num_channels
_SCREAMING_SNAKE_CASE : List[str] = image_size
_SCREAMING_SNAKE_CASE : Any = min_resolution
_SCREAMING_SNAKE_CASE : Tuple = max_resolution
_SCREAMING_SNAKE_CASE : List[Any] = do_resize
_SCREAMING_SNAKE_CASE : Optional[int] = size
_SCREAMING_SNAKE_CASE : List[str] = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "apply_ocr" ) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , snake_case__ )
self.assertIsInstance(encoding.boxes , snake_case__ )
# Test batched
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Optional[int] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Tuple = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = LayoutLMvaImageProcessor()
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
_SCREAMING_SNAKE_CASE : Dict = Image.open(ds[0]["file"] ).convert("RGB" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(snake_case__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_SCREAMING_SNAKE_CASE : Tuple = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
_SCREAMING_SNAKE_CASE : Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case__ )
self.assertListEqual(encoding.boxes , snake_case__ )
# with apply_OCR = False
_SCREAMING_SNAKE_CASE : int = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
_SCREAMING_SNAKE_CASE : int = image_processing(snake_case__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 295 |
"""simple docstring"""
from __future__ import annotations
lowercase_ : List[str] = '''#'''
class UpperCamelCase :
def __init__( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : dict = {}
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self._trie
for char in text:
if char not in trie:
_SCREAMING_SNAKE_CASE : List[str] = {}
_SCREAMING_SNAKE_CASE : List[str] = trie[char]
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self._trie
for char in prefix:
if char in trie:
_SCREAMING_SNAKE_CASE : str = trie[char]
else:
return []
return self._elements(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = []
for c, v in d.items():
_SCREAMING_SNAKE_CASE : int = [" "] if c == END else [(c + s) for s in self._elements(snake_case__ )]
result.extend(snake_case__ )
return tuple(snake_case__ )
lowercase_ : Union[str, Any] = Trie()
lowercase_ : Optional[Any] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> tuple:
_SCREAMING_SNAKE_CASE : Dict = trie.find_word(lowerCamelCase__ )
return tuple(string + word for word in suffixes )
def _lowerCAmelCase ( ) -> None:
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 295 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 209 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 279 | 0 |
from math import factorial
def UpperCamelCase ( _a = 1_0_0 ) -> int:
'''simple docstring'''
return sum(map(_a , str(factorial(_a ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 714 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , ):
lowercase_ :Dict = parent
lowercase_ :Optional[Any] = 13
lowercase_ :Optional[Any] = 7
lowercase_ :List[Any] = 30
lowercase_ :int = self.seq_length + self.mem_len
lowercase_ :Any = 15
lowercase_ :Optional[Any] = True
lowercase_ :List[Any] = True
lowercase_ :Any = 99
lowercase_ :Optional[int] = [10, 50, 80]
lowercase_ :Union[str, Any] = 32
lowercase_ :List[Any] = 32
lowercase_ :Tuple = 4
lowercase_ :Tuple = 8
lowercase_ :List[Any] = 128
lowercase_ :Any = 2
lowercase_ :Tuple = 2
lowercase_ :Dict = None
lowercase_ :Optional[Any] = 1
lowercase_ :Optional[int] = 0
lowercase_ :List[str] = 3
lowercase_ :Optional[int] = self.vocab_size - 1
lowercase_ :List[Any] = 0.01
def UpperCamelCase ( self ):
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :int = None
if self.use_labels:
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :int = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = TFTransfoXLModel(UpperCamelCase_ )
lowercase_ , lowercase_ :List[Any] = model(UpperCamelCase_ ).to_tuple()
lowercase_ :Dict = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase_ , lowercase_ :int = model(UpperCamelCase_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = TFTransfoXLLMHeadModel(UpperCamelCase_ )
lowercase_ , lowercase_ :int = model(UpperCamelCase_ ).to_tuple()
lowercase_ :Optional[int] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase_ , lowercase_ :Optional[int] = model(UpperCamelCase_ ).to_tuple()
lowercase_ , lowercase_ :Tuple = model([input_ids_a, mems_a] ).to_tuple()
lowercase_ :Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase_ , lowercase_ :Union[str, Any] = model(UpperCamelCase_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :int = TFTransfoXLForSequenceClassification(UpperCamelCase_ )
lowercase_ :int = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
lowercase_ :str = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) :Tuple = config_and_inputs
lowercase_ :Dict = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : str =(
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase : Dict =() if is_tf_available() else ()
lowercase : List[str] =(
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase : Optional[int] =False
lowercase : Tuple =False
lowercase : Dict =False
lowercase : Dict =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = TFTransfoXLModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ , d_embed=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
self.model_tester.set_seed()
lowercase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase_ )
def UpperCamelCase ( self ):
self.model_tester.set_seed()
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase_ :Dict = model_class(UpperCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase_ :str = model.get_output_embeddings()
assert isinstance(UpperCamelCase_ , tf.keras.layers.Layer )
lowercase_ :Optional[int] = model.get_bias()
assert name is None
else:
lowercase_ :List[Any] = model.get_output_embeddings()
assert x is None
lowercase_ :Dict = model.get_bias()
assert name is None
def UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Dict = TFTransfoXLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCamelCase ( self ):
pass
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCamelCase ( self ):
lowercase_ :Any = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase_ :List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase_ :List[Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase_ :Any = model.generate(UpperCamelCase_ , max_length=200 , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase_ )
| 441 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a_ = """<<<<<<< This should probably be modified because it mentions: """
a_ = """=======
>>>>>>>
"""
a_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
a_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __lowerCAmelCase ( A_ : Namespace ) -> Optional[int]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase: ArgumentParser ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self: Optional[int] , __lowerCAmelCase: str , __lowerCAmelCase: str , *__lowerCAmelCase: int ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = get_logger("datasets-cli/converting" )
__UpperCAmelCase = tfds_path
__UpperCAmelCase = datasets_directory
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
__UpperCAmelCase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__UpperCAmelCase = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
__UpperCAmelCase = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = {}
if os.path.isdir(self._tfds_path ):
__UpperCAmelCase = os.listdir(__lowerCAmelCase )
else:
__UpperCAmelCase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
__UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowerCAmelCase , encoding="utf-8" ) as f:
__UpperCAmelCase = f.readlines()
__UpperCAmelCase = []
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = []
for line in lines:
__UpperCAmelCase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__UpperCAmelCase = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
__UpperCAmelCase = ""
continue
elif "from absl import logging" in out_line:
__UpperCAmelCase = "from datasets import logging\n"
elif "getLogger" in out_line:
__UpperCAmelCase = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__UpperCAmelCase = True
__UpperCAmelCase = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + "\n" )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__UpperCAmelCase = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__UpperCAmelCase = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
__UpperCAmelCase = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__UpperCAmelCase = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__UpperCAmelCase = f_name.replace(".py" , "" )
__UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
__UpperCAmelCase = os.path.basename(__lowerCAmelCase )
__UpperCAmelCase = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 221 | from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : int
lowerCAmelCase__ : TreeNode | None = None
lowerCAmelCase__ : TreeNode | None = None
a_ = namedtuple("""CoinsDistribResult""", """moves excess""")
def __lowerCAmelCase ( A_ : TreeNode | None ) -> int:
if root is None:
return 0
# Validation
def count_nodes(A_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A_ ) != count_coins(A_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(A_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__UpperCAmelCase , __UpperCAmelCase = get_distrib(node.left )
__UpperCAmelCase , __UpperCAmelCase = get_distrib(node.right )
__UpperCAmelCase = 1 - left_distrib_excess
__UpperCAmelCase = 1 - right_distrib_excess
__UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A_ )
+ abs(A_ )
)
__UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A_ , A_ )
return get_distrib(A_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase__( __UpperCamelCase: Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
SCREAMING_SNAKE_CASE : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
SCREAMING_SNAKE_CASE : List[str] = 0.0_1
with locka.acquire():
with pytest.raises(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = time.time()
locka.acquire(__UpperCamelCase )
assert time.time() - _start > timeout
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 'a' * 10_00 + '.lock'
SCREAMING_SNAKE_CASE : str = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__UpperCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
SCREAMING_SNAKE_CASE : Optional[Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCamelCase ):
locka.acquire(0 )
| 714 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool ,__UpperCamelCase: list[int] ,__UpperCamelCase: float ):
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(__UpperCamelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 ,node_index * 2 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,)
return min(
minimax(depth + 1 ,node_index * 2 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,)
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
SCREAMING_SNAKE_CASE : List[Any] = math.log(len(__UpperCamelCase ) ,2 )
print('Optimal value : ' ,end='' )
print(minimax(0 ,0 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 508 | 0 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> np.array:
'''simple docstring'''
A = int(np.ceil((x_end - xa) / step_size ) )
A = np.zeros((n + 1,) )
A = ya
A = xa
for k in range(lowerCAmelCase__ ):
A = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
A = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase__ , y[k] ) + ode_func(x + step_size , lowerCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = FunnelTokenizer
A = FunnelTokenizerFast
A = True
A = True
def snake_case_ ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , **_lowerCAmelCase ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case_ ( self , **_lowerCAmelCase ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = "UNwant\u00E9d,running"
lowerCAmelCase__ :Dict = "unwanted, running"
return input_text, output_text
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ :Any = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_lowerCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
lowerCAmelCase__ :List[str] = tokenizer("UNwant\u00E9d,running" )
lowerCAmelCase__ :Tuple = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
lowerCAmelCase__ :List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 145 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ = 100 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = n * (n + 1) * (2 * n + 1) / 6
__SCREAMING_SNAKE_CASE = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"{solution() = }")
| 715 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__SCREAMING_SNAKE_CASE = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Any:
__SCREAMING_SNAKE_CASE = vqa_pipeline(UpperCAmelCase__ , top_k=1 )
self.assertEqual(
UpperCAmelCase__ , [
[{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}],
[{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}],
] , )
@require_torch
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__SCREAMING_SNAKE_CASE = "How many cats are there?"
__SCREAMING_SNAKE_CASE = vqa_pipeline(image=UpperCAmelCase__ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ )}] )
@slow
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
__SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__SCREAMING_SNAKE_CASE = "How many cats are there?"
__SCREAMING_SNAKE_CASE = vqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
__SCREAMING_SNAKE_CASE = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [[{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
pass
| 553 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 108 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case="shi-labs/oneformer_demo" ) -> Any:
with open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) as f:
_UpperCAmelCase = json.load(__snake_case )
_UpperCAmelCase = {}
_UpperCAmelCase = []
_UpperCAmelCase = []
for key, info in class_info.items():
_UpperCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(__snake_case ) )
_UpperCAmelCase = thing_ids
_UpperCAmelCase = class_names
return metadata
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]=7 , lowerCamelCase : str=3 , lowerCamelCase : Union[str, Any]=30 , lowerCamelCase : Optional[int]=400 , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : int=True , lowerCamelCase : List[str]=True , lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase : Tuple=10 , lowerCamelCase : str=False , lowerCamelCase : Union[str, Any]=255 , lowerCamelCase : Tuple="shi-labs/oneformer_demo" , lowerCamelCase : Tuple="ade20k_panoptic.json" , lowerCamelCase : Optional[Any]=10 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = class_info_file
_UpperCAmelCase = prepare_metadata(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = num_text
_UpperCAmelCase = repo_path
# for the post_process_functions
_UpperCAmelCase = 2
_UpperCAmelCase = 10
_UpperCAmelCase = 10
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = num_labels
_UpperCAmelCase = do_reduce_labels
_UpperCAmelCase = ignore_index
def lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCamelCase ( self : Any , lowerCamelCase : int , lowerCamelCase : Tuple=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size["""shortest_edge"""] * h / w )
_UpperCAmelCase = self.size["""shortest_edge"""]
elif w > h:
_UpperCAmelCase = self.size["""shortest_edge"""]
_UpperCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
_UpperCAmelCase = self.size["""shortest_edge"""]
_UpperCAmelCase = self.size["""shortest_edge"""]
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
_UpperCAmelCase = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowerCamelCase = image_processing_class
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = OneFormerImageProcessorTester(self )
@property
def lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """ignore_index""" ) )
self.assertTrue(hasattr(lowerCamelCase , """class_info_file""" ) )
self.assertTrue(hasattr(lowerCamelCase , """num_text""" ) )
self.assertTrue(hasattr(lowerCamelCase , """repo_path""" ) )
self.assertTrue(hasattr(lowerCamelCase , """metadata""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_reduce_labels""" ) )
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processor
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_UpperCAmelCase = image_processor(
lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
# Initialize image_processor
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_UpperCAmelCase = image_processor(
lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
# Initialize image_processor
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_UpperCAmelCase = image_processor(
lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self : Tuple , lowerCamelCase : Tuple=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Union[str, Any]="np" ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_UpperCAmelCase = self.image_processing_tester.num_labels
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase )
if with_segmentation_maps:
_UpperCAmelCase = num_labels
if is_instance_map:
_UpperCAmelCase = list(range(lowerCamelCase ) ) * 2
_UpperCAmelCase = dict(enumerate(lowerCamelCase ) )
_UpperCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_UpperCAmelCase = [Image.fromarray(lowerCamelCase ) for annotation in annotations]
_UpperCAmelCase = image_processor(
lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , lowerCamelCase , return_tensors="""pt""" , instance_id_to_semantic_id=lowerCamelCase , pad_and_return_pixel_mask=lowerCamelCase , )
return inputs
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
def common(lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=None ):
_UpperCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCamelCase , is_instance_map=lowerCamelCase , segmentation_type=lowerCamelCase )
_UpperCAmelCase = inputs["""mask_labels"""]
_UpperCAmelCase = inputs["""class_labels"""]
_UpperCAmelCase = inputs["""pixel_values"""]
_UpperCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCamelCase )
common(is_instance_map=lowerCamelCase , segmentation_type="""pil""" )
common(is_instance_map=lowerCamelCase , segmentation_type="""pil""" )
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = np.zeros((20, 50) )
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = binary_mask_to_rle(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_UpperCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(lowerCamelCase , target_sizes=lowerCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase = image_processor.post_process_instance_segmentation(lowerCamelCase , threshold=0 )
self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , lowerCamelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase = image_processor.post_process_panoptic_segmentation(lowerCamelCase , threshold=0 )
self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , lowerCamelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) | 108 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""new-model"""
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =NewModelConfig
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "bert-base-cased"
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = "bert-base-cased"
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> int:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
@require_tensorflow_probability
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(
_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsInstance(_a, _a )
self.assertEqual(model.num_parameters(), 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_a ), 1_44_10 )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsInstance(_a, _a )
self.assertEqual(model.num_parameters(), 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_a ), 1_44_10 )
def __lowerCAmelCase ( self ) -> List[Any]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = copy.deepcopy(model.config )
__SCREAMING_SNAKE_CASE = ["FunnelBaseModel"]
__SCREAMING_SNAKE_CASE = TFAutoModel.from_config(_a )
self.assertIsInstance(_a, _a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a )
self.assertIsInstance(_a, _a )
def __lowerCAmelCase ( self ) -> str:
try:
AutoConfig.register("new-model", _a )
__SCREAMING_SNAKE_CASE = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_a ):
auto_class.register(_a, _a )
auto_class.register(_a, _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
auto_class.register(_a, _a )
# Now that the config is registered, it can be used as any other config with the auto-API
__SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config()
__SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() )
__SCREAMING_SNAKE_CASE = auto_class.from_config(_a )
self.assertIsInstance(_a, _a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = auto_class.from_pretrained(_a )
self.assertIsInstance(_a, _a )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowerCAmelCase ( self ) -> List[str]:
with self.assertRaisesRegex(
_a, "bert-base is not a local folder and is not a valid model identifier" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("bert-base" )
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
_a, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a, revision="aaaaaa" )
def __lowerCAmelCase ( self ) -> Dict:
with self.assertRaisesRegex(
_a, "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin", ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __lowerCAmelCase ( self ) -> Optional[int]:
with self.assertRaisesRegex(_a, "Use `from_pt=True` to load this model" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def __lowerCAmelCase ( self ) -> List[Any]:
# Make sure we have cached the model.
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
# With a sharded checkpoint
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
| 712 |
def _A ( __snake_case :int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__SCREAMING_SNAKE_CASE = str(__snake_case )
__SCREAMING_SNAKE_CASE = "".join(sorted(__snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( __snake_case :float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
while True:
if check_bouncy(__snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 214 | 0 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
UpperCAmelCase__ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
UpperCAmelCase__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
UpperCAmelCase__ = BeautifulSoup(res.text, 'html.parser')
UpperCAmelCase__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"https://google.com{link.get('href')}")
| 224 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_A = str(_SCREAMING_SNAKE_CASE )
_A = ''.join(sorted(_SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_A = 0
_A = 1
while True:
if check_bouncy(_SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 27 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCAmelCase : int =1_0_0_0_0
__UpperCAmelCase : Optional[List[str]] =None
__UpperCAmelCase : Optional[datasets.Features] =None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCAmelCase : List[str] =ParquetConfig
def snake_case ( self ):
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self , __a ):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
__lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
__lowerCAmelCase = data_files
if isinstance(__a , __a ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(__a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__a ):
with open(__a , "rb" ) as f:
__lowerCAmelCase = datasets.Features.from_arrow_schema(pq.read_schema(__a ) )
break
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"files": files} ) )
return splits
def snake_case ( self , __a ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase = table_cast(__a , self.info.features.arrow_schema )
return pa_table
def snake_case ( self , __a ):
__lowerCAmelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
with open(__a , "rb" ) as f:
__lowerCAmelCase = pq.ParquetFile(__a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__lowerCAmelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(__a )
except ValueError as e:
logger.error(f"Failed to read file \'{file}\' with error {type(__a )}: {e}" )
raise
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : Optional[int] = logging.get_logger(__name__)
A : List[str] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""bit"""
__UpperCAmelCase : Optional[int] =["""preactivation""", """bottleneck"""]
__UpperCAmelCase : List[str] =["""SAME""", """VALID"""]
def __init__( self , __a=3 , __a=64 , __a=[2_56, 5_12, 10_24, 20_48] , __a=[3, 4, 6, 3] , __a="preactivation" , __a="relu" , __a=None , __a=32 , __a=0.0 , __a=False , __a=32 , __a=1 , __a=None , __a=None , **__a , ):
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowerCAmelCase = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
__lowerCAmelCase = num_channels
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = layer_type
__lowerCAmelCase = hidden_act
__lowerCAmelCase = global_padding
__lowerCAmelCase = num_groups
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = embedding_dynamic_padding
__lowerCAmelCase = output_stride
__lowerCAmelCase = width_factor
__lowerCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__a ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 282 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 395 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
snake_case__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase__ ( a : Any ) -> Optional[int]:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase__ ( a : Dict , a : str , a : Optional[Any] ) -> Tuple:
"""simple docstring"""
return max(metric_fn(a , a ) for gt in ground_truths )
def lowerCamelCase__ ( a : int , a : str , a : Optional[Any] ) -> Tuple:
"""simple docstring"""
a__ :List[str] = [line.strip() for line in open(a , "r" ).readlines()]
a__ :Tuple = []
if args.gold_data_mode == "qa":
a__ :Optional[Any] = pd.read_csv(a , sep="\t" , header=a )
for answer_list in data[1]:
a__ :Union[str, Any] = ast.literal_eval(a )
answers.append(a )
else:
a__ :int = [line.strip() for line in open(a , "r" ).readlines()]
a__ :int = [[reference] for reference in references]
a__ :Tuple = 0
for prediction, ground_truths in zip(a , a ):
total += 1
em += metric_max_over_ground_truths(a , a , a )
fa += metric_max_over_ground_truths(a , a , a )
a__ :Optional[int] = 1_0_0.0 * em / total
a__ :str = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def lowerCamelCase__ ( a : Optional[int] , a : Tuple , a : int ) -> Dict:
"""simple docstring"""
a__ :List[Any] = args.k
a__ :str = [line.strip() for line in open(a , "r" ).readlines()]
a__ :Any = [line.strip() for line in open(a , "r" ).readlines()]
a__ :Optional[int] = 0
for hypo, reference in zip(a , a ):
a__ :Optional[Any] = set(hypo.split("\t" )[:k] )
a__ :int = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a__ :List[Any] = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def lowerCamelCase__ ( a : Optional[int] , a : Tuple , a : Optional[Any] ) -> Dict:
"""simple docstring"""
def strip_title(a : Any ):
if title.startswith("\"" ):
a__ :Optional[Any] = title[1:]
if title.endswith("\"" ):
a__ :Any = title[:-1]
return title
a__ :int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a , return_tensors="pt" , padding=a , truncation=a , )["input_ids"].to(args.device )
a__ :Dict = rag_model.rag.question_encoder(a )
a__ :Optional[int] = question_enc_outputs[0]
a__ :Optional[Any] = rag_model.retriever(
a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a__ :int = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a__ :Any = []
for docs in all_docs:
a__ :Tuple = [strip_title(a ) for title in docs["title"]]
provenance_strings.append("\t".join(a ) )
return provenance_strings
def lowerCamelCase__ ( a : Union[str, Any] , a : Union[str, Any] , a : str ) -> str:
"""simple docstring"""
with torch.no_grad():
a__ :str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a , return_tensors="pt" , padding=a , truncation=a )
a__ :Tuple = inputs_dict.input_ids.to(args.device )
a__ :int = inputs_dict.attention_mask.to(args.device )
a__ :int = rag_model.generate( # rag_model overwrites generate
a , attention_mask=a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a__ :Optional[int] = rag_model.retriever.generator_tokenizer.batch_decode(a , skip_special_tokens=a )
if args.print_predictions:
for q, a in zip(a , a ):
logger.info("Q: {} - A: {}".format(a , a ) )
return answers
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
a__ :List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=a , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=a , choices=["exact", "compressed", "legacy"] , type=a , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=a , type=a , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=a , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=a , type=a , required=a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=a , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=a , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=a , type=a , required=a , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=a , type=a , required=a , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=a , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=a , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=a , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=a , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=a , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a__ :Tuple = parser.parse_args()
a__ :str = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowerCamelCase__ ( a : int ) -> List[Any]:
"""simple docstring"""
a__ :Optional[int] = {}
if args.model_type is None:
a__ :Dict = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a__ :Union[str, Any] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a__ :Union[str, Any] = args.n_docs
if args.index_name is not None:
a__ :Tuple = args.index_name
if args.index_path is not None:
a__ :Any = args.index_path
else:
a__ :Optional[Any] = BartForConditionalGeneration
a__ :Optional[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , a )
a__ :List[Any] = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a__ :Optional[Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(a , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(a ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a__ :Dict = RagRetriever.from_pretrained(a , **a )
a__ :Any = model_class.from_pretrained(a , retriever=a , **a )
model.retriever.init_retrieval()
else:
a__ :int = model_class.from_pretrained(a , **a )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a__ :str = []
for line in tqdm(a ):
questions.append(line.strip() )
if len(a ) == args.eval_batch_size:
a__ :Union[str, Any] = evaluate_batch_fn(a , a , a )
preds_file.write("\n".join(a ) + "\n" )
preds_file.flush()
a__ :Optional[Any] = []
if len(a ) > 0:
a__ :List[str] = evaluate_batch_fn(a , a , a )
preds_file.write("\n".join(a ) )
preds_file.flush()
score_fn(a , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
snake_case__ = get_args()
main(args)
| 395 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 1
@add_end_docstrings(A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = "generated"
def __init__( self , *A_ , **A_ ) -> Dict:
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _a ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , ) -> Union[str, Any]:
__UpperCamelCase ={}
if truncation is not None:
__UpperCamelCase =truncation
__UpperCamelCase =generate_kwargs
__UpperCamelCase ={}
if return_tensors is not None and return_type is None:
__UpperCamelCase =ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__UpperCamelCase =return_type
if clean_up_tokenization_spaces is not None:
__UpperCamelCase =clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCamelCase =self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__UpperCamelCase =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self , A_ , A_ , A_ ) -> Tuple:
return True
def _a ( self , *A_ , A_ ) -> Any:
__UpperCamelCase =self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , A_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
__UpperCamelCase =([prefix + arg for arg in args[0]],)
__UpperCamelCase =True
elif isinstance(args[0] , A_ ):
__UpperCamelCase =(prefix + args[0],)
__UpperCamelCase =False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
__UpperCamelCase =self.tokenizer(*A_ , padding=A_ , truncation=A_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A_ , **A_ ) -> Union[str, Any]:
__UpperCamelCase =super().__call__(*A_ , **A_ )
if (
isinstance(args[0] , A_ )
and all(isinstance(A_ , A_ ) for el in args[0] )
and all(len(A_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _a ( self , A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , **A_ ) -> Union[str, Any]:
__UpperCamelCase =self._parse_and_tokenize(A_ , truncation=A_ , **A_ )
return inputs
def _a ( self , A_ , **A_ ) -> str:
if self.framework == "pt":
__UpperCamelCase , __UpperCamelCase =model_inputs['input_ids'].shape
elif self.framework == "tf":
__UpperCamelCase , __UpperCamelCase =tf.shape(model_inputs['input_ids'] ).numpy()
__UpperCamelCase =generate_kwargs.get('min_length' , self.model.config.min_length )
__UpperCamelCase =generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(A_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
__UpperCamelCase =self.model.generate(**A_ , **A_ )
__UpperCamelCase =output_ids.shape[0]
if self.framework == "pt":
__UpperCamelCase =output_ids.reshape(A_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__UpperCamelCase =tf.reshape(A_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _a ( self , A_ , A_=ReturnType.TEXT , A_=False ) -> List[str]:
__UpperCamelCase =[]
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__UpperCamelCase ={f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
__UpperCamelCase ={
f'{self.return_name}_text': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
}
records.append(A_ )
return records
@add_end_docstrings(A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "summary"
def __call__( self , *A_ , **A_ ) -> Union[str, Any]:
return super().__call__(*A_ , **A_ )
def _a ( self , A_ , A_ , A_ ) -> bool:
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "translation"
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def _a ( self , *A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , A_=None , A_=None ) -> Dict:
if getattr(self.tokenizer , '_build_translation_inputs' , A_ ):
return self.tokenizer._build_translation_inputs(
*A_ , return_tensors=self.framework , truncation=A_ , src_lang=A_ , tgt_lang=A_ )
else:
return super()._parse_and_tokenize(*A_ , truncation=A_ )
def _a ( self , A_=None , A_=None , **A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =super()._sanitize_parameters(**A_ )
if src_lang is not None:
__UpperCamelCase =src_lang
if tgt_lang is not None:
__UpperCamelCase =tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__UpperCamelCase =kwargs.get('task' , self.task )
__UpperCamelCase =task.split('_' )
if task and len(A_ ) == 4:
# translation, XX, to YY
__UpperCamelCase =items[1]
__UpperCamelCase =items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A_ , **A_ ) -> int:
return super().__call__(*A_ , **A_ )
| 682 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCamelCase__ : str = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , **A__ ) -> Union[str, Any]:
super().__init__(**A__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(A__ )
def __call__( self , A__ , A__ = None , **A__ , ) -> Tuple:
if "text_queries" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs.pop("""text_queries""" )
if isinstance(A__ , (str, Image.Image) ):
_SCREAMING_SNAKE_CASE = {"""image""": image, """candidate_labels""": candidate_labels}
else:
_SCREAMING_SNAKE_CASE = image
_SCREAMING_SNAKE_CASE = super().__call__(A__ , **A__ )
return results
def UpperCamelCase ( self , **A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""threshold"""]
if "top_k" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""top_k"""]
return {}, {}, postprocess_params
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = load_image(inputs["""image"""] )
_SCREAMING_SNAKE_CASE = inputs["""candidate_labels"""]
if isinstance(A__ , A__ ):
_SCREAMING_SNAKE_CASE = candidate_labels.split(""",""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=self.framework )
_SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=self.framework )
yield {
"is_last": i == len(A__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self , A__ ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = model_inputs.pop("""target_size""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_label""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = self.model(**A__ )
_SCREAMING_SNAKE_CASE = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def UpperCamelCase ( self , A__ , A__=0.1 , A__=None ) -> List[Any]:
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
_SCREAMING_SNAKE_CASE = model_output["""candidate_label"""]
_SCREAMING_SNAKE_CASE = BaseModelOutput(A__ )
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=A__ , threshold=A__ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
_SCREAMING_SNAKE_CASE = outputs["""scores"""][index].item()
_SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs["""boxes"""][index][0] )
_SCREAMING_SNAKE_CASE = {"""score""": score, """label""": label, """box""": box}
results.append(A__ )
_SCREAMING_SNAKE_CASE = sorted(A__ , key=lambda A__ : x["score"] , reverse=A__ )
if top_k:
_SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self , A__ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = box.int().tolist()
_SCREAMING_SNAKE_CASE = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 591 |
'''simple docstring'''
class _a :
"""simple docstring"""
def __init__( self , A__ ) -> List[Any]:
# we need a list not a string, so do something to change the type
_SCREAMING_SNAKE_CASE = arr.split(""",""" )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = [int(self.array[0] )] * len(self.array )
_SCREAMING_SNAKE_CASE = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_SCREAMING_SNAKE_CASE = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_SCREAMING_SNAKE_CASE = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = input("please input some numbers:")
UpperCamelCase__ : List[Any] = SubArray(whole_array)
UpperCamelCase__ : str = array.solve_sub_array()
print(("the results is:", re))
| 591 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class a ( unittest.TestCase ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=13 , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Tuple=99 , lowerCamelCase_ : int=32 , lowerCamelCase_ : int=5 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : int=5_12 , lowerCamelCase_ : Optional[int]=16 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Any=4 , ) -> Any:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
def lowerCAmelCase_ ( self : int ) -> List[Any]:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : Optional[Any] ) -> Any:
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class a ( A_ , unittest.TestCase ):
A_ : Union[str, Any] = True
A_ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : Any ) -> Optional[int]:
__a = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Any:
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_ )
__a = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Dict ) -> Optional[int]:
__a = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_ )
__a = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
__a = model(lowerCamelCase_ )[0]
__a = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
__a = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ) -> Tuple:
__a = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_ )
__a = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
__a = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
__a = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 704 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class a ( A_ ):
A_ : str = '''open-llama'''
def __init__( self : Tuple , lowerCamelCase_ : Tuple=10_00_00 , lowerCamelCase_ : Union[str, Any]=40_96 , lowerCamelCase_ : Any=1_10_08 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Tuple="silu" , lowerCamelCase_ : Dict=20_48 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : List[Any]=1E-6 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Tuple=0 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : int , ) -> List[Any]:
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = initializer_range
__a = rms_norm_eps
__a = use_cache
__a = kwargs.pop(
"""use_memorry_efficient_attention""" , lowerCamelCase_ )
__a = hidden_dropout_prob
__a = attention_dropout_prob
__a = use_stable_embedding
__a = shared_input_output_embedding
__a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"""got {self.rope_scaling}""" )
__a = self.rope_scaling.get("""type""" , lowerCamelCase_ )
__a = self.rope_scaling.get("""factor""" , lowerCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 173 | 0 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[str] = CycleDiffusionPipeline
SCREAMING_SNAKE_CASE_: Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
SCREAMING_SNAKE_CASE_: Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
SCREAMING_SNAKE_CASE_: Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
SCREAMING_SNAKE_CASE_: List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_: List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A__ = CLIPTextModel(__a )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , __a , __a=0 ):
"""simple docstring"""
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
A__ = image / 2 + 0.5
if str(__a ).startswith('mps' ):
A__ = torch.manual_seed(__a )
else:
A__ = torch.Generator(device=__a ).manual_seed(__a )
A__ = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = CycleDiffusionPipeline(**__a )
A__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
A__ = self.get_dummy_inputs(__a )
A__ = pipe(**__a )
A__ = output.images
A__ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A__ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.get_dummy_components()
for name, module in components.items():
if hasattr(__a , 'half' ):
A__ = module.half()
A__ = CycleDiffusionPipeline(**__a )
A__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
A__ = self.get_dummy_inputs(__a )
A__ = pipe(**__a )
A__ = output.images
A__ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A__ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _UpperCAmelCase ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _UpperCAmelCase ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _UpperCAmelCase ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _UpperCAmelCase ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
A__ = init_image.resize((512, 512) )
A__ = 'CompVis/stable-diffusion-v1-4'
A__ = DDIMScheduler.from_pretrained(__a , subfolder='scheduler' )
A__ = CycleDiffusionPipeline.from_pretrained(
__a , scheduler=__a , safety_checker=__a , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
A__ = 'A black colored car'
A__ = 'A blue colored car'
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__a , source_prompt=__a , image=__a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__a , output_type='np' , )
A__ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
A__ = init_image.resize((512, 512) )
A__ = 'CompVis/stable-diffusion-v1-4'
A__ = DDIMScheduler.from_pretrained(__a , subfolder='scheduler' )
A__ = CycleDiffusionPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
A__ = 'A black colored car'
A__ = 'A blue colored car'
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__a , source_prompt=__a , image=__a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__a , output_type='np' , )
A__ = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 554 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ = None ):
A__ = word_bank or []
# create a table
A__ = len(lowerCAmelCase__ ) + 1
A__ = []
for _ in range(lowerCAmelCase__ ):
table.append([] )
# seed value
A__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase__ )] == word:
A__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase__ )]:
combination.reverse()
return table[len(lowerCAmelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 554 | 1 |
'''simple docstring'''
from math import sqrt
def _UpperCAmelCase ( __A : List[Any] ):
a_ : List[str] = 0
for i in range(1 , int(sqrt(lowerCAmelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCAmelCase_ ):
total += i + n // i
elif i == sqrt(lowerCAmelCase_ ):
total += i
return total - n
def _UpperCAmelCase ( __A : Union[str, Any] = 1_00_00 ):
a_ : str = sum(
i
for i in range(1 , lowerCAmelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCAmelCase_ ) ) == i and sum_of_divisors(lowerCAmelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 466 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Optional[Any] = 256
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = ['''melgan''']
def __init__(self : Optional[Any] , A__ : SpectrogramNotesEncoder , A__ : SpectrogramContEncoder , A__ : TaFilmDecoder , A__ : DDPMScheduler , A__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase = math.log(1e-5 ) # Matches MelGAN training.
lowercase = 4.0 # Largest value for most examples
lowercase = 1_2_8
self.register_modules(
notes_encoder=A__ , continuous_encoder=A__ , decoder=A__ , scheduler=A__ , melgan=A__ , )
def UpperCAmelCase__ (self : Union[str, Any] , A__ : Any , A__ : Tuple=(-1.0, 1.0) , A__ : Any=False ) -> Any:
lowercase , lowercase = output_range
if clip:
lowercase = torch.clip(A__ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCAmelCase__ (self : Tuple , A__ : Any , A__ : List[str]=(-1.0, 1.0) , A__ : Any=False ) -> str:
lowercase , lowercase = input_range
lowercase = torch.clip(A__ , A__ , A__ ) if clip else outputs
# Scale to [0, 1].
lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCAmelCase__ (self : List[str] , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[Any] ) -> Dict:
lowercase = input_tokens > 0
lowercase , lowercase = self.notes_encoder(
encoder_input_tokens=A__ , encoder_inputs_mask=A__ )
lowercase , lowercase = self.continuous_encoder(
encoder_inputs=A__ , encoder_inputs_mask=A__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCAmelCase__ (self : int , A__ : int , A__ : Optional[int] , A__ : List[Any] ) -> str:
lowercase = noise_time
if not torch.is_tensor(A__ ):
lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase = self.decoder(
encodings_and_masks=A__ , decoder_input_tokens=A__ , decoder_noise_time=A__ )
return logits
@torch.no_grad()
def __call__(self : int , A__ : List[List[int]] , A__ : Optional[torch.Generator] = None , A__ : int = 1_0_0 , A__ : bool = True , A__ : str = "numpy" , A__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__ )}.' )
lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
for i, encoder_input_tokens in enumerate(A__ ):
if i == 0:
lowercase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase = ones
lowercase = self.scale_features(
A__ , output_range=[-1.0, 1.0] , clip=A__ )
lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A__ , continuous_mask=A__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase = self.decode(
encodings_and_masks=A__ , input_tokens=A__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase = self.scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
lowercase = self.scale_to_features(A__ , input_range=[-1.0, 1.0] )
lowercase = mel[:1]
lowercase = mel.cpu().float().numpy()
lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ )
logger.info("Generated segment" , A__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__ )
| 310 | 0 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _a ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowercase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase_ = CLIPTextModel(lowercase_ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any]=0 ):
'''simple docstring'''
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(lowercase_ )
else:
lowercase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ = 2
lowercase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase_ , device=torch.device(lowercase_ ) , )
lowercase_ = floats_tensor(control_image.shape , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert("""RGB""" ).resize((64, 64) )
lowercase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _a ( __a , __a , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowercase_ : Tuple ):
if isinstance(lowercase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowercase_ )
torch.manual_seed(0 )
lowercase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowercase_ )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase_ = CLIPTextModel(lowercase_ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = MultiControlNetModel([controlneta, controlneta] )
lowercase_ = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : str=0 ):
'''simple docstring'''
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(lowercase_ )
else:
lowercase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ = 2
lowercase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase_ , device=torch.device(lowercase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase_ , device=torch.device(lowercase_ ) , ),
]
lowercase_ = floats_tensor(control_image[0].shape , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert("""RGB""" ).resize((64, 64) )
lowercase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
lowercase_ = 1_0.0
lowercase_ = 4
lowercase_ = self.get_dummy_inputs(lowercase_ )
lowercase_ = steps
lowercase_ = scale
lowercase_ = pipe(**lowercase_ )[0]
lowercase_ = self.get_dummy_inputs(lowercase_ )
lowercase_ = steps
lowercase_ = scale
lowercase_ = pipe(**lowercase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowercase_ = self.get_dummy_inputs(lowercase_ )
lowercase_ = steps
lowercase_ = scale
lowercase_ = pipe(**lowercase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowercase_ = self.get_dummy_inputs(lowercase_ )
lowercase_ = steps
lowercase_ = scale
lowercase_ = pipe(**lowercase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowercase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowercase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase_ , controlnet=lowercase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ = """evil space-punk bird"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowercase_ = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowercase_ = pipe(
lowercase_ , lowercase_ , control_image=lowercase_ , generator=lowercase_ , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowercase_ = output.images[0]
assert image.shape == (512, 512, 3)
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9e-2
| 603 | '''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
A_ = ['''pixel_values''']
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowercase_ )
lowercase_ = size if size is not None else {"""shortest_edge""": 384}
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase_ = do_resize
lowercase_ = size
# Default value set here for backwards compatibility where the value in config is None
lowercase_ = crop_pct if crop_pct is not None else 224 / 256
lowercase_ = resample
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : float , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowercase_ = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase_ = int(shortest_edge / crop_pct )
lowercase_ = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_ )
lowercase_ = resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase_ , size=(shortest_edge, shortest_edge) , data_format=lowercase_ , **lowercase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase_ , size=(shortest_edge, shortest_edge) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[str] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=lowercase_ , size=lowercase_ , crop_pct=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 603 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
with open(UpperCamelCase_ ) as metadata_file:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = LukeConfig(use_entity_aware_attention=UpperCamelCase_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase_ , map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ = load_original_entity_vocab(UpperCamelCase_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ = AddedToken('<ent>' , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = AddedToken('<ent2>' , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = 'MLukeTokenizer'
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = MLukeTokenizer.from_pretrained(UpperCamelCase_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE__ = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE__ = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[bias_name]
SCREAMING_SNAKE_CASE__ = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ = F'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE__ = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE__ = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ = LukeForMaskedLM(config=UpperCamelCase_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE__ = state_dict[key]
else:
SCREAMING_SNAKE_CASE__ = state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
if set(UpperCamelCase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(UpperCamelCase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ = MLukeTokenizer.from_pretrained(UpperCamelCase_ , task='entity_classification' )
SCREAMING_SNAKE_CASE__ = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE__ = (0, 9)
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCamelCase_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ = torch.Size((1, 33, 768) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ = MLukeTokenizer.from_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE__ = (24, 30)
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCamelCase_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE__ = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(UpperCamelCase_ ) )
model.save_pretrained(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE__ = [json.loads(UpperCamelCase_ ) for line in open(UpperCamelCase_ )]
SCREAMING_SNAKE_CASE__ = {}
for entry in data:
SCREAMING_SNAKE_CASE__ = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ = entity_id
break
SCREAMING_SNAKE_CASE__ = F'{language}:{entity_name}'
SCREAMING_SNAKE_CASE__ = entity_id
return new_mapping
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__snake_case = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 472 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__snake_case = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE__ = json.loads(request.urlopen(UpperCamelCase_ ).read() )['releases'].keys()
return sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : version.Version(UpperCamelCase_ ) )
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def _lowercase ( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
init_hf_modules()
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
with open(UpperCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE__ = re.findall('^\s*import\s+\.(\S+)\s*$' , UpperCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , UpperCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(UpperCamelCase_ ) )
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = [module_file]
SCREAMING_SNAKE_CASE__ = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase_ ).parent
SCREAMING_SNAKE_CASE__ = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE__ = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE__ = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ ) == 0
all_relative_imports.extend(UpperCamelCase_ )
return all_relative_imports
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
with open(UpperCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE__ = re.findall('^\s*import\s+(\S+)\s*$' , UpperCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , UpperCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE__ = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE__ = list(set(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = []
for imp in imports:
try:
importlib.import_module(UpperCamelCase_ )
except ImportError:
missing_packages.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(UpperCamelCase_ )}. Run `pip install {" ".join(UpperCamelCase_ )}`' )
return get_relative_imports(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE__ = importlib.import_module(UpperCamelCase_ )
if class_name is None:
return find_pipeline_class(UpperCamelCase_ )
return getattr(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE__ = dict(inspect.getmembers(UpperCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , UpperCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE__ = cls
return pipeline_class
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = str(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = module_file_or_url
SCREAMING_SNAKE_CASE__ = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE__ = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE__ = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE__ = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE__ = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE__ = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE__ = COMMUNITY_PIPELINES_URL.format(revision=UpperCamelCase_ , pipeline=UpperCamelCase_ )
try:
SCREAMING_SNAKE_CASE__ = cached_download(
UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = 'git'
SCREAMING_SNAKE_CASE__ = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ = hf_hub_download(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE__ = check_imports(UpperCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(UpperCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE__ = F'{module_needed}.py'
shutil.copy(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE__ = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = model_info(UpperCamelCase_ , revision=UpperCamelCase_ , token=UpperCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE__ = submodule_path / commit_hash
SCREAMING_SNAKE_CASE__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(UpperCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(UpperCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
UpperCamelCase_ , F'{module_needed}.py' , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
return os.path.join(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , **UpperCamelCase_ , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_cached_module_file(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
return get_class_in_module(UpperCamelCase_ , final_module.replace('.py' , '' ) )
| 472 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def A(__a: str = "" ):
lowerCAmelCase_ = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowerCAmelCase_ = BeautifulSoup(requests.get(__a ).text , "html.parser" )
lowerCAmelCase_ = soup.find_all("td" , attrs="titleColumn" )
lowerCAmelCase_ = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__a , __a )
}
def A(__a: str = "IMDb_Top_250_Movies.csv" ):
lowerCAmelCase_ = get_imdb_top_aaa_movies()
with open(__a , "w" , newline="" ) as out_file:
lowerCAmelCase_ = csv.writer(__a )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 719 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ (unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a , config_name=_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoConfig.from_pretrained("gpt2" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
lowerCAmelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a , _a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = {
"max_new_tokens": 1024,
"foo": "bar",
}
lowerCAmelCase_ = copy.deepcopy(_a )
lowerCAmelCase_ = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a , _a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a , {"foo": "bar"} )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
assert not hasattr(_a , "foo" ) # no new kwargs should be initialized if from config
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ (unittest.TestCase ):
@classmethod
def __a ( cls ) -> Optional[Any]:
lowerCAmelCase_ = TOKEN
HfFolder.save_token(_a )
@classmethod
def __a ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="test-generation-config" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="valid_org/test-generation-config-org" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
| 226 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str ,A : List[str] ,A : List[Any]=13 ,A : Optional[int]=7 ,A : Tuple=True ,A : Tuple=True ,A : Tuple=True ,A : Optional[Any]=True ,A : Optional[int]=99 ,A : Optional[Any]=32 ,A : int=5 ,A : List[Any]=4 ,A : Union[str, Any]=37 ,A : int="gelu" ,A : int=0.1 ,A : List[Any]=0.1 ,A : List[str]=5_12 ,A : int=16 ,A : str=2 ,A : int=0.02 ,A : Optional[Any]=4 ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_attention_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_choices
def UpperCamelCase_ ( self : Dict ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_attention_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__A = AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self : Dict ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : List[Any] ):
__A = FlaxAlbertModelTester(self )
@slow
def UpperCamelCase_ ( self : List[Any] ):
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained("albert-base-v2" )
__A = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = FlaxAlbertModel.from_pretrained("albert-base-v2" )
__A = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__A = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__A = model(A ,attention_mask=A )[0]
__A = (1, 11, 7_68)
self.assertEqual(output.shape ,A )
__A = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,A ,atol=1E-4 ) )
| 55 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowerCAmelCase : str
__lowerCAmelCase : List[str]
__lowerCAmelCase : Optional[List[str]]
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowerCAmelCase : List[int]
__lowerCAmelCase : List[int]
__lowerCAmelCase : Optional[List[int]] =None
__lowerCAmelCase : Optional[List[int]] =None
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Dict ='''train'''
__lowerCAmelCase : Any ='''dev'''
__lowerCAmelCase : Any ='''test'''
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( snake_case :Any, snake_case :Union[Split, str]):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCamelCase__ ( snake_case :str):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCamelCase__ ( snake_case :List[InputExample], snake_case :List[str], snake_case :int, snake_case :PreTrainedTokenizer, snake_case :str=False, snake_case :Union[str, Any]="[CLS]", snake_case :int=1, snake_case :int="[SEP]", snake_case :str=False, snake_case :int=False, snake_case :Tuple=0, snake_case :Union[str, Any]=0, snake_case :Optional[int]=-100, snake_case :Optional[int]=0, snake_case :Union[str, Any]=True, ):
"""simple docstring"""
_lowercase ={label: i for i, label in enumerate(snake_case)}
_lowercase =[]
for ex_index, example in enumerate(snake_case):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d', snake_case, len(snake_case))
_lowercase =[]
_lowercase =[]
for word, label in zip(example.words, example.labels):
_lowercase =tokenizer.tokenize(snake_case)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(snake_case) > 0:
tokens.extend(snake_case)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(snake_case) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowercase =tokenizer.num_special_tokens_to_add()
if len(snake_case) > max_seq_length - special_tokens_count:
_lowercase =tokens[: (max_seq_length - special_tokens_count)]
_lowercase =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowercase =[sequence_a_segment_id] * len(snake_case)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowercase =[cls_token] + tokens
_lowercase =[pad_token_label_id] + label_ids
_lowercase =[cls_token_segment_id] + segment_ids
_lowercase =tokenizer.convert_tokens_to_ids(snake_case)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowercase =[1 if mask_padding_with_zero else 0] * len(snake_case)
# Zero-pad up to the sequence length.
_lowercase =max_seq_length - len(snake_case)
if pad_on_left:
_lowercase =([pad_token] * padding_length) + input_ids
_lowercase =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowercase =([pad_token_segment_id] * padding_length) + segment_ids
_lowercase =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(snake_case) == max_seq_length
assert len(snake_case) == max_seq_length
assert len(snake_case) == max_seq_length
assert len(snake_case) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***')
logger.info('guid: %s', example.guid)
logger.info('tokens: %s', ' '.join([str(snake_case) for x in tokens]))
logger.info('input_ids: %s', ' '.join([str(snake_case) for x in input_ids]))
logger.info('input_mask: %s', ' '.join([str(snake_case) for x in input_mask]))
logger.info('segment_ids: %s', ' '.join([str(snake_case) for x in segment_ids]))
logger.info('label_ids: %s', ' '.join([str(snake_case) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
_lowercase =None
features.append(
InputFeatures(
input_ids=snake_case, attention_mask=snake_case, token_type_ids=snake_case, label_ids=snake_case))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : List[InputFeatures]
__lowerCAmelCase : int =nn.CrossEntropyLoss().ignore_index
def __init__( self :Optional[Any], snake_case :TokenClassificationTask, snake_case :str, snake_case :PreTrainedTokenizer, snake_case :List[str], snake_case :str, snake_case :Optional[int] = None, snake_case :Union[str, Any]=False, snake_case :Split = Split.train, ):
"""simple docstring"""
_lowercase =os.path.join(
snake_case, 'cached_{}_{}_{}'.format(mode.value, tokenizer.__class__.__name__, str(snake_case)), )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowercase =cached_features_file + '.lock'
with FileLock(snake_case):
if os.path.exists(snake_case) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''')
_lowercase =torch.load(snake_case)
else:
logger.info(f'''Creating features from dataset file at {data_dir}''')
_lowercase =token_classification_task.read_examples_from_file(snake_case, snake_case)
# TODO clean up all this to leverage built-in features of tokenizers
_lowercase =token_classification_task.convert_examples_to_features(
snake_case, snake_case, snake_case, snake_case, cls_token_at_end=bool(model_type in ['xlnet']), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ['xlnet'] else 0, sep_token=tokenizer.sep_token, sep_token_extra=snake_case, pad_on_left=bool(tokenizer.padding_side == 'left'), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
logger.info(f'''Saving features into cached file {cached_features_file}''')
torch.save(self.features, snake_case)
def __len__( self :Union[str, Any]):
"""simple docstring"""
return len(self.features)
def __getitem__( self :Optional[int], snake_case :List[Any]):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowerCAmelCase : List[InputFeatures]
__lowerCAmelCase : int =-1_0_0
def __init__( self :Tuple, snake_case :TokenClassificationTask, snake_case :str, snake_case :PreTrainedTokenizer, snake_case :List[str], snake_case :str, snake_case :Optional[int] = None, snake_case :Dict=False, snake_case :Split = Split.train, ):
"""simple docstring"""
_lowercase =token_classification_task.read_examples_from_file(snake_case, snake_case)
# TODO clean up all this to leverage built-in features of tokenizers
_lowercase =token_classification_task.convert_examples_to_features(
snake_case, snake_case, snake_case, snake_case, cls_token_at_end=bool(model_type in ['xlnet']), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ['xlnet'] else 0, sep_token=tokenizer.sep_token, sep_token_extra=snake_case, pad_on_left=bool(tokenizer.padding_side == 'left'), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowercase =tf.data.Dataset.from_generator(
snake_case, ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa), (
{'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None])},
tf.TensorShape([None]),
), )
else:
_lowercase =tf.data.Dataset.from_generator(
snake_case, ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa), (
{
'input_ids': tf.TensorShape([None]),
'attention_mask': tf.TensorShape([None]),
'token_type_ids': tf.TensorShape([None]),
},
tf.TensorShape([None]),
), )
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self :Union[str, Any]):
"""simple docstring"""
return len(self.features)
def __getitem__( self :Optional[Any], snake_case :Optional[int]):
"""simple docstring"""
return self.features[i]
| 557 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 557 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "deit"
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=768 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Dict=3_072 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : str=1e-1_2 , SCREAMING_SNAKE_CASE__ : int=224 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[str]=16 , **SCREAMING_SNAKE_CASE__ : Any , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = encoder_stride
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = version.parse("1.11" )
@property
def a ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a ( self : Dict ) -> float:
return 1e-4
| 61 | '''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE : List[str] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
require_version(deps[pkg] , _UpperCAmelCase )
| 244 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
lowerCAmelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler("""sample_euler""" )
lowerCAmelCase = """A painting of a squirrel eating a burger"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __snake_case ( self ) -> str:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowerCAmelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler("""sample_euler""" )
lowerCAmelCase = """A painting of a squirrel eating a burger"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowerCAmelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
lowerCAmelCase = """A painting of a squirrel eating a burger"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=A_ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 711 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase = 'roberta'
elif args.model_type == "gpt2":
UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCAmelCase = 'transformer'
UpperCAmelCase = model.state_dict()
UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCAmelCase = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCAmelCase = F'''{prefix}.embeddings.{w}.weight'''
UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCAmelCase = F'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCAmelCase = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCAmelCase = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[F'''lm_head.dense.{w}''']
UpperCAmelCase = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[F'''{prefix}.ln_f.{w}''']
UpperCAmelCase = state_dict['lm_head.weight']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 344 | 0 |
'''simple docstring'''
a : Union[str, Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a : int = [{"type": "code", "content": INSTALL_CONTENT}]
a : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 640 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
"""simple docstring"""
from statistics import mean
import numpy as np
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 0
# Number of processes finished
_SCREAMING_SNAKE_CASE = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
_SCREAMING_SNAKE_CASE = [0] * no_of_process
# List to include calculation results
_SCREAMING_SNAKE_CASE = [0] * no_of_process
# Sort by arrival time.
_SCREAMING_SNAKE_CASE = [burst_time[i] for i in np.argsort(UpperCamelCase__ )]
_SCREAMING_SNAKE_CASE = [process_name[i] for i in np.argsort(UpperCamelCase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
_SCREAMING_SNAKE_CASE = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
_SCREAMING_SNAKE_CASE = arrival_time[i]
_SCREAMING_SNAKE_CASE = 0
# Index showing the location of the process being performed
_SCREAMING_SNAKE_CASE = 0
# Saves the current response ratio.
_SCREAMING_SNAKE_CASE = 0
for i in range(0 , UpperCamelCase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
_SCREAMING_SNAKE_CASE = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
_SCREAMING_SNAKE_CASE = temp
_SCREAMING_SNAKE_CASE = i
# Calculate the turn around time
_SCREAMING_SNAKE_CASE = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
_SCREAMING_SNAKE_CASE = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [0] * no_of_process
for i in range(0 , UpperCamelCase__ ):
_SCREAMING_SNAKE_CASE = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = 5
lowerCamelCase : List[Any] = ["""A""", """B""", """C""", """D""", """E"""]
lowerCamelCase : Union[str, Any] = [1, 2, 3, 4, 5]
lowerCamelCase : int = [1, 2, 3, 4, 5]
lowerCamelCase : List[Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase : Optional[Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 706 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __snake_case( unittest.TestCase ):
def A ( self , A_ , A_ , A_ ):
'''simple docstring'''
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = None
ops.enable_eager_execution_internal()
_SCREAMING_SNAKE_CASE = tf.config.list_physical_devices('''CPU''' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_SCREAMING_SNAKE_CASE = tf.config.list_logical_devices(device_type='''CPU''' )
_SCREAMING_SNAKE_CASE = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_SCREAMING_SNAKE_CASE = GradientAccumulator()
_SCREAMING_SNAKE_CASE = tf.Variable([4.0, 3.0] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = create_optimizer(5e-5 , 10 , 5 )
_SCREAMING_SNAKE_CASE = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ , A_ ):
with strategy.scope():
_SCREAMING_SNAKE_CASE = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ , A_ ):
_SCREAMING_SNAKE_CASE = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 168 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = 4_2
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : List[Any] = None
def A ( ) -> Node | None:
lowerCamelCase : List[str] = Node(1 )
lowerCamelCase : Any = Node(2 )
lowerCamelCase : int = Node(3 )
lowerCamelCase : str = Node(4 )
lowerCamelCase : List[Any] = Node(5 )
return tree
def A ( _SCREAMING_SNAKE_CASE ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def A ( _SCREAMING_SNAKE_CASE ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def A ( _SCREAMING_SNAKE_CASE ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def A ( _SCREAMING_SNAKE_CASE ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def A ( _SCREAMING_SNAKE_CASE ) -> Sequence[Node | None]:
lowerCamelCase : Tuple = []
if root is None:
return output
lowerCamelCase : List[str] = deque([root] )
while process_queue:
lowerCamelCase : int = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Sequence[Node | None]:
lowerCamelCase : Dict = []
def populate_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(UpperCamelCase_ ,UpperCamelCase_ )
return output
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Sequence[Node | None]:
lowerCamelCase : Dict = []
def populate_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(UpperCamelCase_ ,UpperCamelCase_ )
return output
def A ( _SCREAMING_SNAKE_CASE ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : str = 0
lowerCamelCase : Optional[Any] = height(UpperCamelCase_ )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCamelCase_ ,UpperCamelCase_ ) )
lowerCamelCase : Union[str, Any] = 1
else:
output.append(get_nodes_from_right_to_left(UpperCamelCase_ ,UpperCamelCase_ ) )
lowerCamelCase : Tuple = 0
return output
def A ( ) -> None: # Main function for testing.
lowerCamelCase : str = make_tree()
print(f'''In-order Traversal: {inorder(UpperCamelCase_ )}''' )
print(f'''Pre-order Traversal: {preorder(UpperCamelCase_ )}''' )
print(f'''Post-order Traversal: {postorder(UpperCamelCase_ )}''' ,"\n" )
print(f'''Height of Tree: {height(UpperCamelCase_ )}''' ,"\n" )
print("Complete Level Order Traversal: " )
print(level_order(UpperCamelCase_ ) ,"\n" )
print("Level-wise order Traversal: " )
for level in range(1 ,height(UpperCamelCase_ ) + 1 ):
print(f'''Level {level}:''' ,get_nodes_from_left_to_right(UpperCamelCase_ ,level=UpperCamelCase_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311 | import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_SCREAMING_SNAKE_CASE = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
_SCREAMING_SNAKE_CASE = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
_SCREAMING_SNAKE_CASE = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : List[Any]=False ):
"""simple docstring"""
UpperCamelCase = compute_bleu(
reference_corpus=lowerCamelCase_ , translation_corpus=lowerCamelCase_ , max_order=lowerCamelCase_ , smooth=lowerCamelCase_ )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 537 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCAmelCase : List[Any] = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
__lowerCAmelCase : List[Any] = str(bin(SCREAMING_SNAKE_CASE ) )[2:]
__lowerCAmelCase : str = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 240 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> str:
if not (isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
__lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Dict = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowerCAmelCase : List[str] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowerCAmelCase : Tuple = i
__lowerCAmelCase : Any = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod() | 240 | 1 |
from __future__ import annotations
from random import choice
def A__ ( __A : str ) ->int:
return choice(__lowerCAmelCase )
def A__ ( __A : Optional[int] , __A : Union[str, Any] ) ->int:
__A =random_pivot(__lowerCAmelCase )
# partition based on pivot
# linear time
__A =[e for e in lst if e < pivot]
__A =[e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCAmelCase ) < k - 1:
return kth_number(__lowerCAmelCase , k - len(__lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = '''data2vec-vision'''
def __init__( self : str , _snake_case : str=768 , _snake_case : Tuple=12 , _snake_case : Any=12 , _snake_case : Optional[int]=3072 , _snake_case : Tuple="gelu" , _snake_case : Dict=0.0 , _snake_case : Any=0.0 , _snake_case : Tuple=0.02 , _snake_case : List[Any]=1E-1_2 , _snake_case : int=224 , _snake_case : List[str]=16 , _snake_case : List[str]=3 , _snake_case : Optional[int]=False , _snake_case : str=False , _snake_case : Tuple=False , _snake_case : Tuple=False , _snake_case : Any=0.1 , _snake_case : Any=0.1 , _snake_case : List[Any]=True , _snake_case : List[Any]=[3, 5, 7, 11] , _snake_case : List[Any]=[1, 2, 3, 6] , _snake_case : Tuple=True , _snake_case : str=0.4 , _snake_case : Any=256 , _snake_case : Any=1 , _snake_case : str=False , _snake_case : str=255 , **_snake_case : Dict , ):
super().__init__(**_snake_case )
__lowercase : int = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : List[str] = layer_norm_eps
__lowercase : str = image_size
__lowercase : List[str] = patch_size
__lowercase : Dict = num_channels
__lowercase : Optional[Any] = use_mask_token
__lowercase : Optional[int] = use_absolute_position_embeddings
__lowercase : Tuple = use_relative_position_bias
__lowercase : Dict = use_shared_relative_position_bias
__lowercase : List[Any] = layer_scale_init_value
__lowercase : Union[str, Any] = drop_path_rate
__lowercase : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowercase : List[str] = out_indices
__lowercase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowercase : Dict = use_auxiliary_head
__lowercase : str = auxiliary_loss_weight
__lowercase : Union[str, Any] = auxiliary_channels
__lowercase : Dict = auxiliary_num_convs
__lowercase : Dict = auxiliary_concat_input
__lowercase : Dict = semantic_loss_ignore_index
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : int = version.parse('''1.11''' )
@property
def snake_case_ ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self : Tuple ):
return 1E-4
| 509 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : str = 6 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple =None
SCREAMING_SNAKE_CASE__ : Any =None
self.create_linked_list(__lowerCAmelCase )
def __magic_name__ ( self : List[Any] , __lowercase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any =Node()
SCREAMING_SNAKE_CASE__ : Any =current_node
SCREAMING_SNAKE_CASE__ : Optional[int] =current_node
SCREAMING_SNAKE_CASE__ : Tuple =current_node
for _ in range(1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any =Node()
SCREAMING_SNAKE_CASE__ : Optional[Any] =current_node
SCREAMING_SNAKE_CASE__ : Optional[Any] =previous_node
SCREAMING_SNAKE_CASE__ : Optional[Any] =current_node
SCREAMING_SNAKE_CASE__ : str =self.front
SCREAMING_SNAKE_CASE__ : Dict =previous_node
def __magic_name__ ( self : List[str] ) -> Dict:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __magic_name__ ( self : Dict , __lowercase : Any ) -> str:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =data
def __magic_name__ ( self : Tuple ) -> List[str]:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE__ : str =self.front.data
SCREAMING_SNAKE_CASE__ : Optional[int] =None
return data
SCREAMING_SNAKE_CASE__ : Any =self.front
SCREAMING_SNAKE_CASE__ : Any =old_front.next
SCREAMING_SNAKE_CASE__ : int =old_front.data
SCREAMING_SNAKE_CASE__ : Any =None
return data
def __magic_name__ ( self : str ) -> str:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __SCREAMING_SNAKE_CASE :
def __init__( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : str =None
SCREAMING_SNAKE_CASE__ : Dict =None
SCREAMING_SNAKE_CASE__ : Tuple =None
if __name__ == "__main__":
import doctest
doctest.testmod() | 707 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''') | 665 | 0 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : List[str] = logging.get_logger(__name__)
__magic_name__ : Tuple = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __snake_case (lowerCamelCase ):
__a = '''encodec'''
def __init__( self: Union[str, Any] , A_: Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , A_: Any=2_40_00 , A_: List[Any]=1 , A_: Optional[Any]=False , A_: Optional[int]=None , A_: int=None , A_: List[str]=1_28 , A_: Union[str, Any]=32 , A_: List[str]=1 , A_: Dict=[8, 5, 4, 2] , A_: List[Any]="weight_norm" , A_: Any=7 , A_: List[Any]=7 , A_: Tuple=3 , A_: List[str]=2 , A_: Optional[Any]=True , A_: Optional[int]="reflect" , A_: Dict=2 , A_: Union[str, Any]=2 , A_: Union[str, Any]=1.0 , A_: List[str]=10_24 , A_: str=None , A_: List[str]=True , **A_: int , ):
__lowerCamelCase = target_bandwidths
__lowerCamelCase = sampling_rate
__lowerCamelCase = audio_channels
__lowerCamelCase = normalize
__lowerCamelCase = chunk_length_s
__lowerCamelCase = overlap
__lowerCamelCase = hidden_size
__lowerCamelCase = num_filters
__lowerCamelCase = num_residual_layers
__lowerCamelCase = upsampling_ratios
__lowerCamelCase = norm_type
__lowerCamelCase = kernel_size
__lowerCamelCase = last_kernel_size
__lowerCamelCase = residual_kernel_size
__lowerCamelCase = dilation_growth_rate
__lowerCamelCase = use_causal_conv
__lowerCamelCase = pad_mode
__lowerCamelCase = compress
__lowerCamelCase = num_lstm_layers
__lowerCamelCase = trim_right_ratio
__lowerCamelCase = codebook_size
__lowerCamelCase = codebook_dim if codebook_dim is not None else hidden_size
__lowerCamelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**A_ )
@property
def __a ( self: Union[str, Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __a ( self: List[Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __a ( self: List[Any] ):
__lowerCamelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __a ( self: List[Any] ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 281 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __snake_case (lowerCamelCase ):
__a = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 281 | 1 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_a : Union[str, Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_a : Dict = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : List[str] = calculate_rouge(__snake_case ,__snake_case ,bootstrap_aggregation=__snake_case ,rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(__snake_case ,__snake_case )
_lowerCAmelCase : Optional[int] = calculate_rouge(__snake_case ,__snake_case ,bootstrap_aggregation=__snake_case ,rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : List[Any] = "rougeLsum"
_lowerCAmelCase : Optional[Any] = calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case ,rouge_keys=[k] )[k]
_lowerCAmelCase : Optional[Any] = calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case ,rouge_keys=[k] )[k]
assert score > score_no_sep
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : str = ["rouge1", "rouge2", "rougeL"]
_lowerCAmelCase : Optional[Any] = calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case ,rouge_keys=__snake_case )
_lowerCAmelCase : Union[str, Any] = calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case ,rouge_keys=__snake_case )
assert score_sep == score_no_sep
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : List[str] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
_lowerCAmelCase : Optional[Any] = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case ) == calculate_rouge(__snake_case ,__snake_case ,newline_sep=__snake_case )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_lowerCAmelCase : Optional[Any] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
_lowerCAmelCase : Optional[int] = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
_lowerCAmelCase : Any = calculate_rouge(__snake_case ,__snake_case ,rouge_keys=["""rougeLsum"""] ,newline_sep=__snake_case )["rougeLsum"]
_lowerCAmelCase : Union[str, Any] = calculate_rouge(__snake_case ,__snake_case ,rouge_keys=["""rougeLsum"""] )["rougeLsum"]
assert new_score > prev_score
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : int = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
_lowerCAmelCase : Any = calculate_rouge_path(data_dir.joinpath("""test.source""" ) ,data_dir.joinpath("""test.target""" ) )
assert isinstance(__snake_case ,__snake_case )
_lowerCAmelCase : List[str] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) ,data_dir.joinpath("""test.target""" ) ,bootstrap_aggregation=__snake_case )
assert isinstance(__snake_case ,__snake_case )
| 713 | """simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
UpperCAmelCase_ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler("sample_euler" )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCAmelCase_ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler("sample_euler" )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCAmelCase_ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=__lowercase , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __UpperCamelCase ( lowercase__ : str, lowercase__ : str ):
'''simple docstring'''
__lowercase ={
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
__lowercase =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__lowercase =BERTEncoder(
attention_cell=predefined_args['attention_cell'], num_layers=predefined_args['num_layers'], units=predefined_args['units'], hidden_size=predefined_args['hidden_size'], max_length=predefined_args['max_length'], num_heads=predefined_args['num_heads'], scaled=predefined_args['scaled'], dropout=predefined_args['dropout'], output_attention=lowercase__, output_all_encodings=lowercase__, use_residual=predefined_args['use_residual'], activation=predefined_args.get('activation', 'gelu' ), layer_norm_eps=predefined_args.get('layer_norm_eps', lowercase__ ), )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__lowercase ='openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__lowercase =os.path.join(get_home_dir(), 'models' )
__lowercase =_load_vocab(lowercase__, lowercase__, lowercase__, cls=lowercase__ )
__lowercase =nlp.model.BERTModel(
lowercase__, len(lowercase__ ), units=predefined_args['units'], embed_size=predefined_args['embed_size'], embed_dropout=predefined_args['embed_dropout'], word_embed=predefined_args['word_embed'], use_pooler=lowercase__, use_token_type_embed=lowercase__, token_type_vocab_size=predefined_args['token_type_vocab_size'], use_classifier=lowercase__, use_decoder=lowercase__, )
original_bort.load_parameters(lowercase__, cast_dtype=lowercase__, ignore_extra=lowercase__ )
__lowercase =original_bort._collect_params_with_prefix()
# Build our config 🤗
__lowercase ={
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowercase__ ),
}
__lowercase =BertConfig.from_dict(lowercase__ )
__lowercase =BertForMaskedLM(lowercase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase__ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase__ : Optional[Any], lowercase__ : Any ):
__lowercase =hf_param.shape
__lowercase =to_torch(params[gluon_param] )
__lowercase =gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__lowercase =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight, 'word_embed.0.weight' )
__lowercase =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight, 'encoder.position_weight' )
__lowercase =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias, 'encoder.layer_norm.beta' )
__lowercase =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight, 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__lowercase =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__lowercase =hf_bort_model.bert.encoder.layer[i]
# self attention
__lowercase =layer.attention.self
__lowercase =check_and_map_params(
self_attn.key.bias.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__lowercase =check_and_map_params(
self_attn.key.weight.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__lowercase =check_and_map_params(
self_attn.query.bias.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__lowercase =check_and_map_params(
self_attn.query.weight.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__lowercase =check_and_map_params(
self_attn.value.bias.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__lowercase =check_and_map_params(
self_attn.value.weight.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__lowercase =layer.attention.output
__lowercase =check_and_map_params(
self_output.dense.bias, F'''encoder.transformer_cells.{i}.proj.bias''' )
__lowercase =check_and_map_params(
self_output.dense.weight, F'''encoder.transformer_cells.{i}.proj.weight''' )
__lowercase =check_and_map_params(
self_output.LayerNorm.bias, F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__lowercase =check_and_map_params(
self_output.LayerNorm.weight, F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__lowercase =layer.intermediate
__lowercase =check_and_map_params(
intermediate.dense.bias, F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__lowercase =check_and_map_params(
intermediate.dense.weight, F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__lowercase =layer.output
__lowercase =check_and_map_params(
bert_output.dense.bias, F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__lowercase =check_and_map_params(
bert_output.dense.weight, F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__lowercase =check_and_map_params(
bert_output.LayerNorm.bias, F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__lowercase =check_and_map_params(
bert_output.LayerNorm.weight, F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__lowercase =RobertaTokenizer.from_pretrained('roberta-base' )
__lowercase =tokenizer.encode_plus(lowercase__ )['input_ids']
# Get gluon output
__lowercase =mx.nd.array([input_ids] )
__lowercase =original_bort(inputs=lowercase__, token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase__ )
__lowercase =BertModel.from_pretrained(lowercase__ )
hf_bort_model.eval()
__lowercase =tokenizer.encode_plus(lowercase__, return_tensors='pt' )
__lowercase =hf_bort_model(**lowercase__ )[0]
__lowercase =output_gluon[0].asnumpy()
__lowercase =output_hf[0].detach().numpy()
__lowercase =np.max(np.abs(hf_layer - gluon_layer ) ).item()
__lowercase =np.allclose(lowercase__, lowercase__, atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:', lowercase__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 119 | 0 |
import pytest
lowerCamelCase :List[Any] = '__dummy_dataset1__'
lowerCamelCase :List[Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __snake_case ( ) -> List[str]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __snake_case ( ) -> Tuple:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
_a = dataset_loading_script_name
_a = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=_UpperCamelCase )
_a = script_dir / f"{script_name}.py"
with open(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase )
return str(_UpperCamelCase )
| 346 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCamelCase ) -> bytes:
if len(_UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCamelCase ) -> bytes:
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(_UpperCamelCase , '''08x''' )[-8:]
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCamelCase ) -> bytes:
_a = b''''''
for char in message:
bit_string += format(_UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(_UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCamelCase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCamelCase ) -> Generator[list[int], None, None]:
if len(_UpperCamelCase ) % 5_12 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCamelCase ) , 5_12 ):
_a = bit_string[pos : pos + 5_12]
_a = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCamelCase ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(_UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCamelCase , 2 )
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
return (a + b) % 2**32
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCamelCase ) -> bytes:
_a = preprocess(_UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67_45_23_01
_a = 0XEF_CD_AB_89
_a = 0X98_BA_DC_FE
_a = 0X10_32_54_76
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(_UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(_UpperCamelCase , left_rotate_aa(_UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = sum_aa(_UpperCamelCase , _UpperCamelCase )
_a = reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ = logging.getLogger(__name__)
A__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
A__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__lowerCamelCase )} , )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a :
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__lowerCAmelCase : bool = field(
default=__lowerCamelCase , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__lowerCAmelCase : bool = field(
default=__lowerCamelCase , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__lowerCAmelCase : bool = field(default=__lowerCamelCase , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__lowerCAmelCase : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__lowerCAmelCase : float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__lowerCAmelCase : int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__lowerCAmelCase : int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__lowerCAmelCase : bool = field(
default=__lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , ) -> str:
"""simple docstring"""
def _dataset(__lowerCAmelCase , __lowerCAmelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=__lowerCAmelCase , file_path=__lowerCAmelCase , block_size=args.block_size , ref_path=__lowerCAmelCase , )
return LineByLineTextDataset(tokenizer=__lowerCAmelCase , file_path=__lowerCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__lowerCAmelCase , file_path=__lowerCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__lowerCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__lowerCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case__ , snake_case__ , snake_case__ : int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
snake_case__ : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
snake_case__ : Dict = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
snake_case__ : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
snake_case__ : List[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
snake_case__ : Tuple = AutoModelWithLMHead.from_config(__lowerCAmelCase )
model.resize_token_embeddings(len(__lowerCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
snake_case__ : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
snake_case__ : Tuple = min(data_args.block_size , tokenizer.max_len )
# Get datasets
snake_case__ : List[Any] = (
get_dataset(__lowerCAmelCase , tokenizer=__lowerCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
snake_case__ : Optional[int] = (
get_dataset(__lowerCAmelCase , tokenizer=__lowerCAmelCase , evaluate=__lowerCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
snake_case__ : int = DataCollatorForPermutationLanguageModeling(
tokenizer=__lowerCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
snake_case__ : str = DataCollatorForWholeWordMask(
tokenizer=__lowerCAmelCase , mlm_probability=data_args.mlm_probability )
else:
snake_case__ : Any = DataCollatorForLanguageModeling(
tokenizer=__lowerCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ : List[str] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , data_collator=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , prediction_loss_only=__lowerCAmelCase , )
# Training
if training_args.do_train:
snake_case__ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__lowerCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Optional[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ : Optional[Any] = trainer.evaluate()
snake_case__ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
snake_case__ : List[Any] = {'''perplexity''': perplexity}
snake_case__ : Dict = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , __lowerCAmelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(__lowerCAmelCase )
return results
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 252 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''spiece.model'''}
A__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
A__ = '''▁'''
class a ( __lowerCamelCase ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self :Union[str, Any] ,__lowercase :Dict ,__lowercase :Dict="</s>" ,__lowercase :List[Any]="<unk>" ,__lowercase :Optional[int]="<pad>" ,__lowercase :List[Any]=1_0_0 ,__lowercase :Dict=None ,__lowercase :Optional[Dict[str, Any]] = None ,__lowercase :Union[str, Any]=True ,**__lowercase :Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ : str = [F"""<extra_id_{i}>""" for i in range(__lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case__ : str = len(set(filter(lambda __lowercase : bool('''extra_id''' in str(__lowercase ) ) ,__lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
snake_case__ : int = legacy
snake_case__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowercase ,unk_token=__lowercase ,pad_token=__lowercase ,extra_ids=__lowercase ,additional_special_tokens=__lowercase ,sp_model_kwargs=self.sp_model_kwargs ,legacy=__lowercase ,**__lowercase ,)
snake_case__ : Tuple = vocab_file
snake_case__ : Union[str, Any] = extra_ids
snake_case__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@staticmethod
def __lowerCamelCase ( __lowercase :Optional[Any] ,__lowercase :Any ,__lowercase :Optional[int] ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case__ : Optional[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' ,__lowercase ,)
return max_model_length
@property
def __lowerCamelCase ( self :Tuple ):
return self.sp_model.get_piece_size() + self._extra_ids
def __lowerCamelCase ( self :int ):
snake_case__ : List[str] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self :int ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ,__lowercase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase ,token_ids_a=__lowercase ,already_has_special_tokens=__lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__lowercase )) + [1]
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
def __lowerCamelCase ( self :Optional[Any] ):
return list(
set(filter(lambda __lowercase : bool(re.search(r'''<extra_id_\d+>''' ,__lowercase ) ) is not None ,self.additional_special_tokens ) ) )
def __lowerCamelCase ( self :str ):
return [self._convert_token_to_id(__lowercase ) for token in self.get_sentinel_tokens()]
def __lowerCamelCase ( self :str ,__lowercase :List[int] ):
if len(__lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCamelCase ( self :Dict ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCamelCase ( self :int ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : Tuple = self._add_eos_if_not_present(__lowercase )
if token_ids_a is None:
return token_ids_a
else:
snake_case__ : Tuple = self._add_eos_if_not_present(__lowercase )
return token_ids_a + token_ids_a
def __getstate__( self :Any ):
snake_case__ : str = self.__dict__.copy()
snake_case__ : int = None
return state
def __setstate__( self :Optional[int] ,__lowercase :str ):
snake_case__ : Any = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
snake_case__ : Any = {}
snake_case__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self :Dict ,__lowercase :"TextInput" ,**__lowercase :List[Any] ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
snake_case__ : List[str] = SPIECE_UNDERLINE + text.replace(__lowercase ,''' ''' )
return super().tokenize(__lowercase ,**__lowercase )
def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,**__lowercase :int ):
if not self.legacy:
snake_case__ : Union[str, Any] = text.startswith(__lowercase )
if is_first:
snake_case__ : Union[str, Any] = text[1:]
snake_case__ : Optional[Any] = self.sp_model.encode(__lowercase ,out_type=__lowercase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(__lowercase ):
snake_case__ : Tuple = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __lowerCamelCase ( self :List[Any] ,__lowercase :Dict ):
if token.startswith('''<extra_id_''' ):
snake_case__ : str = re.match(r'''<extra_id_(\d+)>''' ,__lowercase )
snake_case__ : int = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__lowercase )
def __lowerCamelCase ( self :int ,__lowercase :Any ):
if index < self.sp_model.get_piece_size():
snake_case__ : List[Any] = self.sp_model.IdToPiece(__lowercase )
else:
snake_case__ : Tuple = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[str] ):
snake_case__ : Optional[int] = []
snake_case__ : int = ''''''
snake_case__ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
snake_case__ : List[Any] = True
snake_case__ : Optional[int] = []
else:
current_sub_tokens.append(__lowercase )
snake_case__ : Optional[Any] = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : Any = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase ,'''wb''' ) as fi:
snake_case__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 252 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def a ( __a , __a=False ) -> int:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ :Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def a ( __a , __a , __a=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ :Any = ''''''
else:
UpperCamelCase__ :List[str] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
UpperCamelCase__ :Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :int = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ :int = in_proj_bias[: config.hidden_size]
UpperCamelCase__ :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ :int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ :Tuple = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ :str = in_proj_bias[-config.hidden_size :]
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__a , __a )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = dct.pop(__a )
UpperCamelCase__ :Any = val
def a ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ :int = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def a ( __a , __a , __a=False ) -> int:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__a , )
UpperCamelCase__ :List[Any] = ViTHybridConfig(backbone_config=__a , image_size=384 , num_labels=1000 )
UpperCamelCase__ :Tuple = False
# load original model from timm
UpperCamelCase__ :Dict = timm.create_model(__a , pretrained=__a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ :Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(__a )
UpperCamelCase__ :List[str] = create_rename_keys(__a , __a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
UpperCamelCase__ :Dict = '''huggingface/label-files'''
UpperCamelCase__ :str = '''imagenet-1k-id2label.json'''
UpperCamelCase__ :Dict = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ :Any = {int(__a ): v for k, v in idalabel.items()}
UpperCamelCase__ :str = idalabel
UpperCamelCase__ :Any = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCamelCase__ :List[Any] = ViTHybridModel(__a ).eval()
else:
UpperCamelCase__ :Optional[int] = ViTHybridForImageClassification(__a ).eval()
model.load_state_dict(__a )
# create image processor
UpperCamelCase__ :str = create_transform(**resolve_data_config({} , model=__a ) )
UpperCamelCase__ :Optional[Any] = transform.transforms
UpperCamelCase__ :Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCamelCase__ :str = ViTHybridImageProcessor(
do_resize=__a , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__a , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCamelCase__ :List[Any] = prepare_img()
UpperCamelCase__ :int = transform(__a ).unsqueeze(0 )
UpperCamelCase__ :Tuple = processor(__a , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__a , __a )
# verify logits
with torch.no_grad():
UpperCamelCase__ :Optional[Any] = model(__a )
UpperCamelCase__ :Tuple = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
UpperCamelCase__ :List[str] = timm_model.forward_features(__a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__a , outputs.pooler_output , atol=1e-3 )
else:
UpperCamelCase__ :List[Any] = timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__a )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
__snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 720 |
'''simple docstring'''
import sys
__snake_case = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
for digit in s:
product *= int(__a )
return product
def a ( __a = N ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = -sys.maxsize - 1
UpperCamelCase__ :Dict = n[:13]
UpperCamelCase__ :Dict = 13
while cur_index < len(__a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCamelCase__ :Dict = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCamelCase__ :List[str] = max(__a , str_eval(__a ) )
UpperCamelCase__ :Union[str, Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""") | 280 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__snake_case = {'''UserAgent''': UserAgent().random}
def _A ( _lowercase ) -> dict:
"""simple docstring"""
__UpperCamelCase = script.contents[0]
__UpperCamelCase = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCamelCase :
def __init__( self: Tuple,A_: int ):
'''simple docstring'''
__UpperCamelCase = F'''https://www.instagram.com/{username}/'''
__UpperCamelCase = self.get_json()
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = requests.get(self.url,headers=A_ ).text
__UpperCamelCase = BeautifulSoup(A_,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: List[Any] ):
'''simple docstring'''
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: str ):
'''simple docstring'''
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.user_data["username"]
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return self.user_data["biography"]
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self: Dict ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return self.user_data["is_private"]
def _A ( _lowercase = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__UpperCamelCase = InstagramUser(_lowercase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowercase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = InstagramUser('''github''')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 1 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 0 |
from __future__ import annotations
from math import pow, sqrt
def a__ ( _UpperCamelCase : float ,_UpperCamelCase : float ,_UpperCamelCase : float ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(_UpperCamelCase ,2 ) - pow(_UpperCamelCase ,2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_UpperCamelCase ,2 ) - pow(_UpperCamelCase ,2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_UpperCamelCase ,2 ) + pow(_UpperCamelCase ,2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 622 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 622 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def a__ ( A__, A__=(), A__=None, A__="no", A__="29500" ):
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : int = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : Optional[int] = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : List[str] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : str = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME', __lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Any = 8
SCREAMING_SNAKE_CASE_ : List[str] = PrepareForLaunch(__lowercase, distributed_type='TPU' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__lowercase, args=__lowercase, nprocs=__lowercase, start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*__lowercase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase, master_addr='127.0.01', master_port=__lowercase, mixed_precision=__lowercase ):
SCREAMING_SNAKE_CASE_ : Any = PrepareForLaunch(__lowercase, distributed_type='MULTI_GPU' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__lowercase, args=__lowercase, nprocs=__lowercase, start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : str = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*__lowercase )
def a__ ( A__, A__=(), A__=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase, master_addr='127.0.01', master_port='29500', accelerate_mixed_precision='no', accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu='yes', ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = PrepareForLaunch(__lowercase, debug=__lowercase )
start_processes(__lowercase, args=__lowercase, nprocs=__lowercase, start_method='fork' )
| 101 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
__SCREAMING_SNAKE_CASE = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
__SCREAMING_SNAKE_CASE = {F"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = FunnelTokenizer
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = 2
def __init__( self :str , _lowerCamelCase :int=None , _lowerCamelCase :List[Any]=None , _lowerCamelCase :Dict=True , _lowerCamelCase :Optional[Any]="<unk>" , _lowerCamelCase :Optional[Any]="<sep>" , _lowerCamelCase :List[Any]="<pad>" , _lowerCamelCase :List[Any]="<cls>" , _lowerCamelCase :Union[str, Any]="<mask>" , _lowerCamelCase :Any="<s>" , _lowerCamelCase :Union[str, Any]="</s>" , _lowerCamelCase :str=True , _lowerCamelCase :Dict=True , _lowerCamelCase :Tuple=None , _lowerCamelCase :Tuple="##" , **_lowerCamelCase :int , ):
'''simple docstring'''
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , clean_text=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , wordpieces_prefix=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase_ : int =getattr(_lowerCamelCase , normalizer_state.pop('type' ) )
UpperCamelCase_ : Dict =do_lower_case
UpperCamelCase_ : Tuple =strip_accents
UpperCamelCase_ : int =tokenize_chinese_chars
UpperCamelCase_ : Tuple =normalizer_class(**_lowerCamelCase )
UpperCamelCase_ : Dict =do_lower_case
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Optional[int]=None ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =[self.sep_token_id]
UpperCamelCase_ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
UpperCamelCase_ : int =self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 357 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int ) -> str:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
snake_case__ : Union[str, Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'ClapFeatureExtractor'
A__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : Optional[int] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = kwargs.pop('''sampling_rate''' , __UpperCamelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
snake_case__ : List[str] = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if audios is not None:
snake_case__ : List[Any] = self.feature_extractor(
__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and audios is not None:
snake_case__ : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowerCAmelCase ( self : str , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase ( self : str , *__UpperCamelCase : Tuple , **__UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.tokenizer.model_input_names
snake_case__ : Optional[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 574 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowercase= []
for i in range(lowercase__ ):
__lowercase= i / num_diffusion_timesteps
__lowercase= (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class A ( A_ , A_ ):
UpperCamelCase_ : Tuple =[e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase_ : Tuple =2
@register_to_config
def __init__(self , lowerCAmelCase = 1_0_0_0 , lowerCAmelCase = 0.0_00_85 , lowerCAmelCase = 0.0_12 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "epsilon" , lowerCAmelCase = "linspace" , lowerCAmelCase = 0 , ):
if trained_betas is not None:
__lowercase= torch.tensor(lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowercase= torch.linspace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase= (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase= betas_for_alpha_bar(lowerCAmelCase )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
__lowercase= 1.0 - self.betas
__lowercase= torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
if schedule_timesteps is None:
__lowercase= self.timesteps
__lowercase= (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowercase= 1 if len(lowerCAmelCase ) > 1 else 0
else:
__lowercase= timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep
__lowercase= self._index_counter[timestep_int]
return indices[pos].item()
@property
def _A (self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _A (self , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.index_for_timestep(lowerCAmelCase )
if self.state_in_first_order:
__lowercase= self.sigmas[step_index]
else:
__lowercase= self.sigmas_interpol[step_index]
__lowercase= sample / ((sigma**2 + 1) ** 0.5)
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , ):
__lowercase= num_inference_steps
__lowercase= num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowercase= np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase , dtype=lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowercase= num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase= (np.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowercase= num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase= (np.arange(lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowercase= np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowercase= torch.from_numpy(np.log(lowerCAmelCase ) ).to(lowerCAmelCase )
__lowercase= np.interp(lowerCAmelCase , np.arange(0 , len(lowerCAmelCase ) ) , lowerCAmelCase )
__lowercase= np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowercase= torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase )
# interpolate sigmas
__lowercase= sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowercase= torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowercase= torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCAmelCase ).startswith('mps' ):
# mps does not support float64
__lowercase= torch.from_numpy(lowerCAmelCase ).to(lowerCAmelCase , dtype=torch.floataa )
else:
__lowercase= torch.from_numpy(lowerCAmelCase ).to(lowerCAmelCase )
# interpolate timesteps
__lowercase= self.sigma_to_t(lowerCAmelCase ).to(lowerCAmelCase , dtype=timesteps.dtype )
__lowercase= torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowercase= torch.cat([timesteps[:1], interleaved_timesteps] )
__lowercase= None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowercase= defaultdict(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
# get log sigma
__lowercase= sigma.log()
# get distribution
__lowercase= log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowercase= dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowercase= low_idx + 1
__lowercase= self.log_sigmas[low_idx]
__lowercase= self.log_sigmas[high_idx]
# interpolate sigmas
__lowercase= (low - log_sigma) / (low - high)
__lowercase= w.clamp(0 , 1 )
# transform interpolation to time range
__lowercase= (1 - w) * low_idx + w * high_idx
__lowercase= t.view(sigma.shape )
return t
@property
def _A (self ):
return self.sample is None
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , ):
__lowercase= self.index_for_timestep(lowerCAmelCase )
# advance index counter by 1
__lowercase= timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowercase= self.sigmas[step_index]
__lowercase= self.sigmas_interpol[step_index + 1]
__lowercase= self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowercase= self.sigmas[step_index - 1]
__lowercase= self.sigmas_interpol[step_index]
__lowercase= self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowercase= 0
__lowercase= sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowercase= sigma_hat if self.state_in_first_order else sigma_interpol
__lowercase= sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowercase= sigma_hat if self.state_in_first_order else sigma_interpol
__lowercase= model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowercase= (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowercase= sigma_interpol - sigma_hat
# store for 2nd order step
__lowercase= sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowercase= (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowercase= sigma_next - sigma_hat
__lowercase= self.sample
__lowercase= None
__lowercase= sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase ):
# mps does not support float64
__lowercase= self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowercase= timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowercase= self.timesteps.to(original_samples.device )
__lowercase= timesteps.to(original_samples.device )
__lowercase= [self.index_for_timestep(lowerCAmelCase , lowerCAmelCase ) for t in timesteps]
__lowercase= sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowercase= sigma.unsqueeze(-1 )
__lowercase= original_samples + noise * sigma
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 230 |
import os
import sys
import unittest
lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= get_test_to_tester_mapping(lowerCAmelCase )
__lowercase= get_test_to_tester_mapping(lowerCAmelCase )
__lowercase= {'BertModelTest': 'BertModelTester'}
__lowercase= {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
def _A (self ):
__lowercase= get_model_to_test_mapping(lowerCAmelCase )
__lowercase= get_model_to_test_mapping(lowerCAmelCase )
__lowercase= {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
__lowercase= {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
def _A (self ):
__lowercase= get_model_to_tester_mapping(lowerCAmelCase )
__lowercase= get_model_to_tester_mapping(lowerCAmelCase )
__lowercase= {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
__lowercase= {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(get_test_info.to_json(lowerCAmelCase ) , lowerCAmelCase )
| 230 | 1 |
from collections import defaultdict
def __lowerCamelCase ( A__ : str , A__ : str ) -> bool:
lowerCamelCase_ : str = first_str.lower().strip()
lowerCamelCase_ : Union[str, Any] = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ : List[Any] = first_str.replace(""" """ , """""" )
lowerCamelCase_ : List[str] = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(A__ ) != len(A__ ):
return False
# Default values for count should be 0
lowerCamelCase_ : defaultdict[str, int] = defaultdict(A__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(A__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ : Tuple = input('Enter the first string ').strip()
snake_case__ : Tuple = input('Enter the second string ').strip()
snake_case__ : Any = check_anagrams(input_a, input_b)
print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 171 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = ""
_a = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Any , __a : Optional[DatasetInfo] = None , __a : Optional[str] = None , **__a : Any , ) ->Any:
super().__init__(self , **__a )
lowerCamelCase_ : Tuple = repo_info
lowerCamelCase_ : Any = token
lowerCamelCase_ : Any = None
def _lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
if self.dir_cache is None:
lowerCamelCase_ : Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCamelCase_ : int = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__a ): {"""name""": str(__a ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowerCAmelCase ( self : int , __a : str , __a : str = "rb" , **__a : Optional[Any] , ) ->Dict:
if not isinstance(self.repo_info , __a ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
lowerCamelCase_ : int = hf_hub_url(self.repo_info.id , __a , revision=self.repo_info.sha )
return fsspec.open(
__a , mode=__a , headers=get_authentication_headers_for_url(__a , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _lowerCAmelCase ( self : Dict , __a : str , **__a : List[Any] ) ->List[Any]:
self._get_dirs()
lowerCamelCase_ : Tuple = self._strip_protocol(__a )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__a )
def _lowerCAmelCase ( self : Any , __a : Optional[Any] , __a : str=False , **__a : List[str] ) ->List[Any]:
self._get_dirs()
lowerCamelCase_ : Optional[Any] = PurePosixPath(path.strip("""/""" ) )
lowerCamelCase_ : Dict = {}
for p, f in self.dir_cache.items():
lowerCamelCase_ : str = PurePosixPath(p.strip("""/""" ) )
lowerCamelCase_ : Dict = p.parent
if root == path:
lowerCamelCase_ : int = f
lowerCamelCase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 171 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any = logging.get_logger(__name__)
_A : Any = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( __snake_case ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = """switch_transformers"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_21_28 , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : List[Any]=64 , SCREAMING_SNAKE_CASE__ : List[str]=20_48 , SCREAMING_SNAKE_CASE__ : Any=64 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=0.0_1 , SCREAMING_SNAKE_CASE__ : List[str]="float32" , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=32 , SCREAMING_SNAKE_CASE__ : str=1_28 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=0.0_0_1 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_0_1 , SCREAMING_SNAKE_CASE__ : List[Any]=1.0 , SCREAMING_SNAKE_CASE__ : Tuple="relu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> List[Any]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = d_kv
__lowerCAmelCase = d_ff
__lowerCAmelCase = num_sparse_encoder_layers
__lowerCAmelCase = num_layers
__lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
__lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
__lowerCAmelCase = num_heads
__lowerCAmelCase = num_experts
__lowerCAmelCase = expert_capacity
__lowerCAmelCase = router_bias
__lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
__lowerCAmelCase = router_dtype
__lowerCAmelCase = router_ignore_padding_tokens
__lowerCAmelCase = relative_attention_num_buckets
__lowerCAmelCase = relative_attention_max_distance
__lowerCAmelCase = dropout_rate
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = feed_forward_proj
__lowerCAmelCase = use_cache
__lowerCAmelCase = add_router_probs
__lowerCAmelCase = router_z_loss_coef
__lowerCAmelCase = router_aux_loss_coef
__lowerCAmelCase = self.feed_forward_proj.split("""-""" )
__lowerCAmelCase = act_info[-1]
__lowerCAmelCase = act_info[0] == """gated"""
if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowerCAmelCase = """gelu_new"""
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 427 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
__A = DiTPipeline
__A = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__A = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__A = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__A = False
def UpperCamelCase ( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase_ , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=10_00 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=lowerCAmelCase_ , )
UpperCAmelCase_ = AutoencoderKL()
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=0 ) -> Optional[int]:
if str(lowerCAmelCase_ ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase_ )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
UpperCAmelCase_ = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase_ = pipe(**lowerCAmelCase_ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCAmelCase_ = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 )
def UpperCamelCase ( self : Dict ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase ( self : Optional[int] ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : List[str] ) -> Dict:
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
UpperCAmelCase_ = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
UpperCAmelCase_ = pipe.get_label_ids(lowerCAmelCase_ )
UpperCAmelCase_ = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def UpperCamelCase ( self : str ) -> Union[str, Any]:
UpperCAmelCase_ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
UpperCAmelCase_ = ['''vase''', '''umbrella''']
UpperCAmelCase_ = pipe.get_label_ids(lowerCAmelCase_ )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 121 | 0 |
"""simple docstring"""
import os
_UpperCamelCase : Dict = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
'''simple docstring'''
lowercase = 0
lowercase = 0
while index < len(__snake_case ) - 1:
lowercase = SYMBOLS[numerals[index]]
lowercase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
lowercase = ''
lowercase = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowercase = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowercase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _SCREAMING_SNAKE_CASE ( __snake_case : str = "/p089_roman.txt" ):
'''simple docstring'''
lowercase = 0
with open(os.path.dirname(__snake_case ) + roman_numerals_filename ) as filea:
lowercase = filea.readlines()
for line in lines:
lowercase = line.strip()
lowercase = parse_roman_numerals(__snake_case )
lowercase = generate_roman_numerals(__snake_case )
savings += len(__snake_case ) - len(__snake_case )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 134 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Dict = torch.device('cpu')
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
lowercase = dct.pop(__snake_case )
lowercase = val
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
'''simple docstring'''
lowercase = []
for k in state_dict.keys():
lowercase = k
if ".pwconv" in k:
lowercase = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
lowercase = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
lowercase = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
lowercase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
lowercase = k_new.split('.' )
if ls[2].isdigit():
lowercase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
lowercase = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : List[str] ):
'''simple docstring'''
lowercase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowercase = 10_00
lowercase = 'huggingface/label-files'
lowercase = 'imagenet-1k-id2label.json'
lowercase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowercase = [3, 3, 6, 4]
lowercase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
lowercase = [3, 3, 9, 6]
lowercase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
lowercase = [4, 3, 10, 5]
lowercase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
lowercase = [4, 4, 12, 6]
lowercase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
lowercase = torch.hub.load_state_dict_from_url(__snake_case , map_location='cpu' , check_hash=__snake_case )
else:
lowercase = torch.load(__snake_case , map_location='cpu' )
lowercase = checkpoint
lowercase = create_rename_keys(__snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
lowercase = SwiftFormerForImageClassification(__snake_case ).eval()
hf_model.load_state_dict(__snake_case )
# prepare test inputs
lowercase = prepare_img()
lowercase = ViTImageProcessor.from_pretrained('preprocessor_config' )
lowercase = processor(images=__snake_case , return_tensors='pt' )
# compare outputs from both models
lowercase = get_expected_output(__snake_case )
lowercase = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , __snake_case , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 134 | 1 |
import warnings
from functools import wraps
from typing import Callable
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
@wraps(_lowercase )
def _inner_fn(*_lowerCAmelCase : List[str] , **_lowerCAmelCase : Dict ):
warnings.warn(
(f"\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.") , _lowercase , )
return fn(*_lowercase , **_lowercase )
return _inner_fn
| 454 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 331 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""vqvae"""]
def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int:
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase )
def __magic_name__ ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowercase ) else 10_00
@torch.no_grad()
def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowercase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =noise
SCREAMING_SNAKE_CASE__ : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample(
generator=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowercase ):
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample''']
else:
SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample''']
if isinstance(self.scheduler , __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step(
model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE__ : Any =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) )
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowercase )
self.scheduler.set_timesteps(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1
SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : int =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase ) | 665 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __magic_name__ :
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase: str = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase: List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase: Tuple = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCamelCase: int = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_UpperCamelCase: Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase: Dict = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase: str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase: Optional[int] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCamelCase: Union[str, Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_UpperCamelCase: Tuple = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
_UpperCamelCase: str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Dict = self.get_dummy_components()
_UpperCamelCase: Dict = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: Union[str, Any] = self.get_dummy_inputs(_lowercase )
_UpperCamelCase: Optional[Any] = inputs['''prompt''']
_UpperCamelCase: str = inputs['''generator''']
_UpperCamelCase: str = inputs['''num_inference_steps''']
_UpperCamelCase: Dict = inputs['''output_type''']
if "image" in inputs:
_UpperCamelCase: List[str] = inputs['''image''']
else:
_UpperCamelCase: Union[str, Any] = None
if "mask_image" in inputs:
_UpperCamelCase: str = inputs['''mask_image''']
else:
_UpperCamelCase: str = None
if "original_image" in inputs:
_UpperCamelCase: int = inputs['''original_image''']
else:
_UpperCamelCase: Optional[Any] = None
_UpperCamelCase , _UpperCamelCase: Tuple = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
_UpperCamelCase: int = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_UpperCamelCase: List[Any] = image
if mask_image is not None:
_UpperCamelCase: List[str] = mask_image
if original_image is not None:
_UpperCamelCase: List[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
_UpperCamelCase: Dict = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
_UpperCamelCase: Any = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
_UpperCamelCase: int = self.get_dummy_inputs(_lowercase )
_UpperCamelCase: Optional[Any] = inputs['''generator''']
_UpperCamelCase: Tuple = inputs['''num_inference_steps''']
_UpperCamelCase: Tuple = inputs['''output_type''']
# inputs with prompt converted to embeddings
_UpperCamelCase: Dict = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_UpperCamelCase: List[Any] = image
if mask_image is not None:
_UpperCamelCase: Union[str, Any] = mask_image
if original_image is not None:
_UpperCamelCase: Optional[int] = original_image
_UpperCamelCase: List[Any] = pipe_loaded(**_lowercase )[0]
_UpperCamelCase: Optional[int] = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1E-4 )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Dict = self.get_dummy_components()
_UpperCamelCase: Optional[Any] = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: Dict = self.get_dummy_inputs(_lowercase )
_UpperCamelCase: int = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
_UpperCamelCase: Any = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_UpperCamelCase: List[Any] = self.get_dummy_inputs(_lowercase )
_UpperCamelCase: str = pipe_loaded(**_lowercase )[0]
_UpperCamelCase: int = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1E-4 ) | 271 | from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( lowercase: str = "" ) -> dict[str, float]:
'''simple docstring'''
_UpperCamelCase: Tuple = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_UpperCamelCase: Union[str, Any] = BeautifulSoup(requests.get(lowercase ).text , '''html.parser''' )
_UpperCamelCase: List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
_UpperCamelCase: str = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase , lowercase )
}
def lowerCAmelCase_ ( lowercase: str = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
_UpperCamelCase: Any = get_imdb_top_aaa_movies()
with open(lowercase , '''w''' , newline='''''' ) as out_file:
_UpperCamelCase: Optional[Any] = csv.writer(lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies() | 271 | 1 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
pass
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = np.array(__UpperCamelCase )
lowerCamelCase_ = npimg.shape
return {"hash": hashimage(__UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
lowerCamelCase_ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
lowerCamelCase_ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'facebook/sam-vit-huge'
lowerCamelCase_ = pipeline('mask-generation' , model=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCamelCase_ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_053},
] , )
| 384 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase = 1 ,__UpperCamelCase = 10_00 ) -> int:
lowerCamelCase_ = 1
lowerCamelCase_ = 0
for divide_by_number in range(__UpperCamelCase ,digit + 1 ):
lowerCamelCase_ = []
lowerCamelCase_ = numerator
for _ in range(1 ,digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCamelCase ):
lowerCamelCase_ = len(__UpperCamelCase )
lowerCamelCase_ = divide_by_number
else:
has_been_divided.append(__UpperCamelCase )
lowerCamelCase_ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase :
__UpperCamelCase =BlenderbotSmallConfig
__UpperCamelCase ={}
__UpperCamelCase ="gelu"
def __init__( self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[int]=1_3 , snake_case__ : Optional[int]=7 , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=False , snake_case__ : int=9_9 , snake_case__ : Any=3_2 , snake_case__ : Tuple=2 , snake_case__ : Optional[int]=4 , snake_case__ : int=3_7 , snake_case__ : Optional[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Any=2_0 , snake_case__ : List[Any]=2 , snake_case__ : int=1 , snake_case__ : int=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCamelCase ( self : Dict , snake_case__ : Tuple , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 )
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCamelCase =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase =(
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =[
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__UpperCamelCase ="facebook/blenderbot_small-90M"
@cached_property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , return_tensors='tf' )
SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 439 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a_ : str = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Dict=None , _UpperCamelCase : str=None ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
while ask_again:
SCREAMING_SNAKE_CASE = input(_UpperCamelCase )
try:
if default is not None and len(_UpperCamelCase ) == 0:
return default
return convert_value(_UpperCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any]=[] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[Any]=0 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BulletMenu(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = menu.run(default_choice=_UpperCamelCase )
return convert_value(_UpperCamelCase ) if convert_value is not None else result
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> str:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._format_usage(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = usage.replace('<command> [<args>] ' , '' )
return usage
| 439 | 1 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCamelCase_ = True
except ImportError:
lowerCamelCase_ = False
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case ( A__ ):
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class UpperCamelCase_ (__A ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : ArgumentParser ) -> Union[str, Any]:
UpperCAmelCase_ : int = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowerCAmelCase_ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowerCAmelCase_ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : Union[str, Any] , lowerCAmelCase_ : bool , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , *lowerCAmelCase_ : Dict ) -> List[Any]:
UpperCAmelCase_ : str = testing
UpperCAmelCase_ : str = testing_file
UpperCAmelCase_ : List[str] = path
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase_ : Dict = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowerCAmelCase_ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
UpperCAmelCase_ : Optional[Any] = (
Path(lowerCAmelCase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase_ : List[str] = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase_ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
UpperCAmelCase_ : Any = json.load(lowerCAmelCase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCAmelCase_ , extra_context=lowerCAmelCase_ , )
UpperCAmelCase_ : Dict = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
UpperCAmelCase_ : Tuple = json.load(lowerCAmelCase_ )
UpperCAmelCase_ : int = configuration["lowercase_modelname"]
UpperCAmelCase_ : Optional[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f"""{directory}/configuration.json""" )
UpperCAmelCase_ : Optional[int] = "PyTorch" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ : Dict = "TensorFlow" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ : Union[str, Any] = "Flax" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ : List[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=lowerCAmelCase_ )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , "w" ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(lowerCAmelCase_ : Optional[int] ):
with open(lowerCAmelCase_ , "r" ) as f:
UpperCAmelCase_ : Tuple = f.readlines()
with open(lowerCAmelCase_ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] ):
# Create temp file
UpperCAmelCase_ : Optional[int] = mkstemp()
UpperCAmelCase_ : str = False
with fdopen(lowerCAmelCase_ , "w" ) as new_file:
with open(lowerCAmelCase_ ) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase_ )
if line_to_copy_below in line:
UpperCAmelCase_ : List[str] = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase_ )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase_ , lowerCAmelCase_ )
# Remove original file
remove(lowerCAmelCase_ )
# Move new file
move(lowerCAmelCase_ , lowerCAmelCase_ )
def skip_units(lowerCAmelCase_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCAmelCase_ : Dict ):
with open(lowerCAmelCase_ ) as datafile:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase_ : Optional[Any] = line.split("\"" )[1]
UpperCAmelCase_ : int = skip_units(lowerCAmelCase_ )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase_ : Dict = line.split("\"" )[1]
UpperCAmelCase_ : str = skip_units(lowerCAmelCase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase_ : List[Any] = []
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase_ )
remove(lowerCAmelCase_ )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(lowerCAmelCase_ )
| 717 |
"""simple docstring"""
class UpperCamelCase_ :
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None ) -> int:
UpperCAmelCase_ : int = data
UpperCAmelCase_ : Optional[int] = previous
UpperCAmelCase_ : int = next_node
def __str__( self : Dict ) -> str:
return f"""{self.data}"""
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.data
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return self.next
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return self.previous
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : str ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = head
def __iter__( self : Any ) -> List[str]:
return self
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
if not self.current:
raise StopIteration
else:
UpperCAmelCase_ : Optional[int] = self.current.get_data()
UpperCAmelCase_ : Dict = self.current.get_next()
return value
class UpperCamelCase_ :
def __init__( self : List[str] ) -> Tuple:
UpperCAmelCase_ : Tuple = None # First node in list
UpperCAmelCase_ : Union[str, Any] = None # Last node in list
def __str__( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.head
UpperCAmelCase_ : int = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase_ : Optional[Any] = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__( self : int , lowerCAmelCase_ : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase_ : Union[str, Any] = current.get_next()
return False
def __iter__( self : int ) -> Tuple:
return LinkedListIterator(self.head )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> None:
if self.head is None:
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : Union[str, Any] = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Node ) -> None:
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : Optional[Any] = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> None:
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Tuple = node.previous
if node.get_previous() is None:
UpperCAmelCase_ : List[Any] = node_to_insert
else:
UpperCAmelCase_ : Dict = node_to_insert
UpperCAmelCase_ : Dict = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> None:
UpperCAmelCase_ : Dict = node
UpperCAmelCase_ : int = node.next
if node.get_next() is None:
UpperCAmelCase_ : int = node_to_insert
else:
UpperCAmelCase_ : Optional[Any] = node_to_insert
UpperCAmelCase_ : Any = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = Node(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
UpperCAmelCase_ : Optional[Any] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Node:
UpperCAmelCase_ : List[Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase_ : Optional[int] = node.get_next()
raise Exception("Node not found" )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
UpperCAmelCase_ : Tuple = self.head.get_next()
if node == self.tail:
UpperCAmelCase_ : Optional[int] = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Node ) -> None:
if node.get_next():
UpperCAmelCase_ : int = node.previous
if node.get_previous():
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Dict = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.head is None
def snake_case ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : str = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCamelCase_( _lowerCamelCase ) -> dict[str, str]:
'''simple docstring'''
_lowerCamelCase : List[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_lowerCamelCase : Any = remove_duplicates(key.upper() )
_lowerCamelCase : Optional[Any] = len(_lowerCamelCase )
# First fill cipher with key characters
_lowerCamelCase : Any = {alphabet[i]: char for i, char in enumerate(_lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowerCamelCase ) , 26 ):
_lowerCamelCase : int = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_lowerCamelCase : Union[str, Any] = alphabet[i - offset]
_lowerCamelCase : List[str] = char
return cipher_alphabet
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(_lowerCamelCase , _lowerCamelCase ) for ch in message.upper() )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowerCamelCase , _lowerCamelCase ) for ch in message.upper() )
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = input("Enter message to encode or decode: " ).strip()
_lowerCamelCase : List[str] = input("Enter keyword: " ).strip()
_lowerCamelCase : Tuple = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
_lowerCamelCase : Any = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
_lowerCamelCase : Optional[Any] = create_cipher_map(_lowerCamelCase )
print(func(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 386 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any]=13 ,__lowerCAmelCase: List[str]=3 ,__lowerCAmelCase: Optional[Any]=224 ,__lowerCAmelCase: Optional[int]=30 ,__lowerCAmelCase: Union[str, Any]=400 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Union[str, Any]=[0.5, 0.5, 0.5] ,__lowerCAmelCase: Tuple=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
_lowerCamelCase : Tuple = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Optional[int] = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : int = do_resize
_lowerCamelCase : Dict = size
_lowerCamelCase : Optional[int] = do_normalize
_lowerCamelCase : int = image_mean
_lowerCamelCase : Tuple = image_std
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = ViTImageProcessor if is_vision_available() else None
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_mean" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_std" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_normalize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_resize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"size" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,Image.Image )
# Test not batched input
_lowerCamelCase : Dict = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : str = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase ,numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,np.ndarray )
# Test not batched input
_lowerCamelCase : List[Any] = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Dict = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : int = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test not batched input
_lowerCamelCase : int = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Tuple = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,) | 386 | 1 |
'''simple docstring'''
from functools import reduce
__lowerCAmelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowerCamelCase ( lowerCAmelCase_ = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase_ , lowerCAmelCase_ : str(int(lowerCAmelCase_ ) * int(lowerCAmelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase_ ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 358 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="attention" ) -> int:
_a : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : int = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_a : List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Dict = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_a : List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[Any]:
if split_mlp_wi:
_a : Dict = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Dict = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : Optional[int] = (wi_a, wi_a)
else:
_a : Tuple = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Tuple = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __lowerCamelCase ( lowerCAmelCase_ , *, lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Any:
_a : Dict = traverse_util.flatten_dict(variables['target'] )
_a : Tuple = {'/'.join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : int = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , lowerCAmelCase_ )
_a : str = collections.OrderedDict()
# Shared embeddings.
_a : List[str] = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
_a : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_attention_layer_norm' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'attention' )
_a : str = layer_norm
_a : Union[str, Any] = k.T
_a : Dict = o.T
_a : int = q.T
_a : List[str] = v.T
# Block i, layer 1 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_mlp_layer_norm' )
_a , _a : Any = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , lowerCAmelCase_ )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : Tuple = wi[0].T
_a : List[str] = wi[1].T
else:
_a : Union[str, Any] = wi.T
_a : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' ).T
_a : Optional[Any] = old['encoder/encoder_norm/scale']
if not scalable_attention:
_a : Union[str, Any] = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , 'encoder' ).T
_a : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
_a : Dict = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_self_attention_layer_norm' )
_a , _a , _a , _a : Union[str, Any] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'self_attention' )
_a : str = layer_norm
_a : List[Any] = k.T
_a : Union[str, Any] = o.T
_a : int = q.T
_a : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
_a : List[Any] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_cross_attention_layer_norm' )
_a , _a , _a , _a : Optional[int] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'encoder_decoder_attention' )
_a : str = layer_norm
_a : Union[str, Any] = k.T
_a : Union[str, Any] = o.T
_a : Any = q.T
_a : str = v.T
# Block i, layer 2 (MLP).
_a : str = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_mlp_layer_norm' )
_a , _a : int = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , lowerCAmelCase_ )
_a : List[str] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : Union[str, Any] = wi[1].T
else:
_a : Optional[Any] = wi.T
_a : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : int = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' ).T
_a : List[Any] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : List[Any] = old['decoder/logits_dense/kernel'].T
return new
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Tuple = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Union[str, Any] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_a : Optional[Any] = state_dict['shared.weight']
return state_dict
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : Any = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
_a : str = convert_tax_to_pytorch(
lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ )
_a : Dict = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , ) -> str:
_a : int = MTaConfig.from_json_file(lowerCAmelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(lowerCAmelCase_ )
else:
_a : Optional[int] = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print('Done' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 358 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = str(_lowerCAmelCase )
return len(_lowerCAmelCase ) == 9 and set(_lowerCAmelCase ) == set("""123456789""" )
def lowercase ():
for base_num in range(9999 , 4999 , -1 ):
__lowerCAmelCase = 10_0002 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCAmelCase = 100_2003 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 720 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=10 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=None , ) -> int:
__lowerCAmelCase = size if size is not None else {"""shortest_edge""": 18}
__lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_frames
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = crop_size
def A__ ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = VivitImageProcessor if is_vision_available() else None
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = VivitImageProcessingTester(self )
@property
def A__ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ ( self ) -> Optional[Any]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for video in video_inputs:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for video in video_inputs:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for video in video_inputs:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 573 | 0 |
# Algorithm for the pigeonhole sorting
def a_ ( lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = min(lowerCAmelCase_ ) # min() finds the minimum value
__lowerCAmelCase = max(lowerCAmelCase_ ) # max() finds the maximum value
__lowerCAmelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowerCAmelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowerCAmelCase = 0
for count in range(lowerCAmelCase_ ):
while holes[count] > 0:
holes[count] -= 1
__lowerCAmelCase = count + min_val
i += 1
def a_ ( ):
__lowerCAmelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase_ )
print('Sorted order is:', ' '.join(lowerCAmelCase_ ) )
if __name__ == "__main__":
main()
| 53 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : str = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 244 | 0 |
def lowerCamelCase_ ( A : int , A : int ):
"""simple docstring"""
lowerCAmelCase_ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCAmelCase_ = n - k
# Calculate C(n,k)
for i in range(A ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase_ ( A : int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count , A ) // (node_count + 1)
def lowerCamelCase_ ( A : int ):
"""simple docstring"""
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
lowerCAmelCase_ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase_ ( A : int ):
"""simple docstring"""
return catalan_number(A ) * factorial(A )
if __name__ == "__main__":
_snake_case = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 413 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase):
lowerCAmelCase_ = value
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase):
lowerCAmelCase_ = tree
def lowercase__ ( self , _UpperCAmelCase):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self):
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413 | 1 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
UpperCAmelCase_ : int = float(_SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError("Please enter a valid number" )
UpperCAmelCase_ : List[str] = decimal - int(_SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(_SCREAMING_SNAKE_CASE ), 1
else:
UpperCAmelCase_ : Any = len(str(_SCREAMING_SNAKE_CASE ).split("." )[1] )
UpperCAmelCase_ : Any = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase_ : Any = 10**number_of_frac_digits
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = denominator, numerator
while True:
UpperCAmelCase_ : List[str] = dividend % divisor
if remainder == 0:
break
UpperCAmelCase_ , UpperCAmelCase_ : int = divisor, remainder
UpperCAmelCase_ , UpperCAmelCase_ : Dict = numerator / divisor, denominator / divisor
return int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction('67') = }""")
print(f"""{decimal_to_fraction('45.0') = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction('6.25') = }""")
print(f"""{decimal_to_fraction('78td') = }""")
| 71 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 1 |
"""simple docstring"""
import os
from pathlib import Path
def lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
from torch.utils.cpp_extension import load
__UpperCAmelCase : int = Path(_UpperCamelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__UpperCAmelCase : Union[str, Any] = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _UpperCamelCase , with_cuda=_UpperCamelCase , extra_include_paths=[str(_UpperCamelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 703 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict=3 , UpperCamelCase : Dict=32 , UpperCamelCase : int=3 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : Any=[8, 16, 32, 64] , UpperCamelCase : Optional[int]=[1, 1, 2, 1] , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[Any]="relu" , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , UpperCamelCase : Optional[int]=[2, 3, 4] , UpperCamelCase : Any=1 , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : int = image_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : int = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Dict = len(UpperCamelCase )
__UpperCAmelCase : Tuple = out_features
__UpperCAmelCase : str = out_indices
__UpperCAmelCase : Optional[int] = num_groups
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = BitModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = BitForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Optional[Any] = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__a = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Any = BitModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase )
__UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(config=UpperCamelCase )
for name, module in model.named_modules():
if isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : str ):
__UpperCAmelCase : List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : List[Any] = layer_type
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[Any] = BitModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**UpperCamelCase )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (BitBackbone,) if is_torch_available() else ()
__a = BitConfig
__a = False
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = BitModelTester(self )
| 299 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase ( a__ = 1_00_00_00 , a__ = 10 ):
'''simple docstring'''
lowerCAmelCase :defaultdict = defaultdict(a__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase :Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase :Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""") | 553 |
"""simple docstring"""
import math
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(a__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__SCREAMING_SNAKE_CASE = 'Enter the base and the power separated by a comma: '
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = map(int, input(prompt).split(','))
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
__SCREAMING_SNAKE_CASE = res(xa, ya)
__SCREAMING_SNAKE_CASE = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal') | 553 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__UpperCamelCase : Optional[int] = {'UserAgent': UserAgent().random}
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = script.contents[0]
SCREAMING_SNAKE_CASE : Any = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase__ :
def __init__( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = f"""https://www.instagram.com/{username}/"""
SCREAMING_SNAKE_CASE : Any = self.get_json()
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = requests.get(self.url , headers=_SCREAMING_SNAKE_CASE ).text
SCREAMING_SNAKE_CASE : Any = BeautifulSoup(_SCREAMING_SNAKE_CASE , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Optional[int] ):
'''simple docstring'''
return f"""{self.__class__.__name__}(\'{self.username}\')"""
def __str__( self : Any ):
'''simple docstring'''
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self.user_data["username"]
@property
def __A ( self : Dict ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __A ( self : Any ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __A ( self : int ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __A ( self : Any ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __A ( self : Any ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __A ( self : str ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __A ( self : int ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self.user_data["is_private"]
def A ( _lowercase = "github" ):
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE : int = InstagramUser(_lowercase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowercase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : List[Any] = InstagramUser('github')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 721 | from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34 | 0 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_lowerCAmelCase = quote(SCREAMING_SNAKE_CASE_ )
return hfh.hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" , revision=SCREAMING_SNAKE_CASE_ )
| 18 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 479 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _A ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : int = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
SCREAMING_SNAKE_CASE : Optional[int] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE : str = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE : Optional[int] = frozenset([])
SCREAMING_SNAKE_CASE : Optional[Any] = True
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Dict = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=_SCREAMING_SNAKE_CASE , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=_SCREAMING_SNAKE_CASE , only_cross_attention=_SCREAMING_SNAKE_CASE , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
SCREAMING_SNAKE_CASE_ : Optional[int] = EulerDiscreteScheduler(prediction_type='sample' )
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTextModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Dict = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'cpu'
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_SCREAMING_SNAKE_CASE ).images
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def UpperCAmelCase ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCAmelCase ( self ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCAmelCase ( self ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCAmelCase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = 2
SCREAMING_SNAKE_CASE_ : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : int = getattr(_SCREAMING_SNAKE_CASE , scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Any = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : List[str] = pipe(**_SCREAMING_SNAKE_CASE )[0]
outputs.append(_SCREAMING_SNAKE_CASE )
assert check_same_shape(_SCREAMING_SNAKE_CASE )
@require_torch_gpu
@slow
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
SCREAMING_SNAKE_CASE_ : Dict = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='latent' ).images
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , num_inference_steps=20 , guidance_scale=0 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , ).images[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
SCREAMING_SNAKE_CASE_ : Dict = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
SCREAMING_SNAKE_CASE_ : Optional[int] = upscaler(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , num_inference_steps=20 , guidance_scale=0 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , ).images[0]
SCREAMING_SNAKE_CASE_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 718 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Dict = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''do_clean_text''': False, '''add_prefix_space''': False}
def UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
SCREAMING_SNAKE_CASE_ : int = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_input_output_texts(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.decode(_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
return text, ids
def UpperCAmelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE_ : List[Any] = 'こんにちは、世界。 こんばんは、㔺界。'
SCREAMING_SNAKE_CASE_ : List[str] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
SCREAMING_SNAKE_CASE_ : str = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
SCREAMING_SNAKE_CASE_ : str = 'こんにちは、、、、世界。こんばんは、、、、世界。'
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
SCREAMING_SNAKE_CASE_ : int = 'こんにちは、世界。'
SCREAMING_SNAKE_CASE_ : List[str] = 'こんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE_ : List[str] = 'こんにちは、世界。こんばんは、世界。😀'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(prefix_text + input_text )
SCREAMING_SNAKE_CASE_ : int = tokenizer.encode('' , prefix_text=prefix_text + input_text )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , prefix_text=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.decode(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = tokenizer.decode(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
SCREAMING_SNAKE_CASE_ : Any = 'こんにちは、世界。'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'こんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE_ : int = len(tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) - 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(tokenizer.encode(_SCREAMING_SNAKE_CASE ) ) - 2
SCREAMING_SNAKE_CASE_ : str = [1] + [0] * (len_prefix + len_text + 1)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
SCREAMING_SNAKE_CASE_ : Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
SCREAMING_SNAKE_CASE_ : Any = tokenizer(prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(_SCREAMING_SNAKE_CASE , prefix_text=_SCREAMING_SNAKE_CASE ).token_type_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('あンいワ' )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode('' , prefix_text='あンいワ' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(_SCREAMING_SNAKE_CASE ) , tokenizer.decode(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(tokenizer.decode(_SCREAMING_SNAKE_CASE ) , tokenizer.decode(_SCREAMING_SNAKE_CASE ) )
self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
SCREAMING_SNAKE_CASE_ : Tuple = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.batch_encode_plus(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
SCREAMING_SNAKE_CASE_ : int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
SCREAMING_SNAKE_CASE_ : Any = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.token_type_ids , _SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.attention_mask , _SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.input_ids , _SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.token_type_ids , _SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.attention_mask , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
| 353 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : int = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 295 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 200 ) -> int:
lowerCamelCase__ : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCamelCase__ : Union[str, Any] = [0] * (pence + 1)
lowerCamelCase__ : List[str] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 295 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "geglu" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "layer_norm" , _SCREAMING_SNAKE_CASE = False , ):
super().__init__()
_UpperCAmelCase = only_cross_attention
_UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
_UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_UpperCAmelCase = AdaLayerNorm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
_UpperCAmelCase = AdaLayerNormZero(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Attention(
query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_SCREAMING_SNAKE_CASE , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_UpperCAmelCase = (
AdaLayerNorm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm
else nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = Attention(
query_dim=_SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , upcast_attention=_SCREAMING_SNAKE_CASE , ) # is self-attn if encoder_hidden_states is none
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
# 3. Feed-forward
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = FeedForward(_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , activation_fn=_SCREAMING_SNAKE_CASE , final_dropout=_SCREAMING_SNAKE_CASE )
# let chunk size default to None
_UpperCAmelCase = None
_UpperCAmelCase = 0
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Sets chunk feed-forward
_UpperCAmelCase = chunk_size
_UpperCAmelCase = dim
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.norma(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hidden_dtype=hidden_states.dtype )
else:
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_UpperCAmelCase = self.attna(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
_UpperCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_UpperCAmelCase = (
self.norma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = self.attna(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = attn_output + hidden_states
# 3. Feed-forward
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
_UpperCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_UpperCAmelCase = torch.cat(
[self.ff(_SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(_SCREAMING_SNAKE_CASE , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_UpperCAmelCase = self.ff(_SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
_UpperCAmelCase = ff_output + hidden_states
return hidden_states
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = "geglu" , _SCREAMING_SNAKE_CASE = False , ):
super().__init__()
_UpperCAmelCase = int(dim * mult )
_UpperCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_UpperCAmelCase = GELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if activation_fn == "gelu-approximate":
_UpperCAmelCase = GELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , approximate="""tanh""" )
elif activation_fn == "geglu":
_UpperCAmelCase = GEGLU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif activation_fn == "geglu-approximate":
_UpperCAmelCase = ApproximateGELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.ModuleList([] )
# project in
self.net.append(_SCREAMING_SNAKE_CASE )
# project dropout
self.net.append(nn.Dropout(_SCREAMING_SNAKE_CASE ) )
# project out
self.net.append(nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
for module in self.net:
_UpperCAmelCase = module(_SCREAMING_SNAKE_CASE )
return hidden_states
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "none" ):
super().__init__()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = approximate
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
if gate.device.type != "mps":
return F.gelu(_SCREAMING_SNAKE_CASE , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.proj(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.gelu(_SCREAMING_SNAKE_CASE )
return hidden_states
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
super().__init__()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , dim_out * 2 )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
if gate.device.type != "mps":
return F.gelu(_SCREAMING_SNAKE_CASE )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase , _UpperCAmelCase = self.proj(_SCREAMING_SNAKE_CASE ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_SCREAMING_SNAKE_CASE )
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
super().__init__()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.proj(_SCREAMING_SNAKE_CASE )
return x * torch.sigmoid(1.702 * x )
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
super().__init__()
_UpperCAmelCase = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.SiLU()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , embedding_dim * 2 )
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.linear(self.silu(self.emb(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase , _UpperCAmelCase = torch.chunk(_SCREAMING_SNAKE_CASE , 2 )
_UpperCAmelCase = self.norm(_SCREAMING_SNAKE_CASE ) * (1 + scale) + shift
return x
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
super().__init__()
_UpperCAmelCase = CombinedTimestepLabelEmbeddings(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.SiLU()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , 6 * embedding_dim , bias=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE , eps=1e-6 )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
_UpperCAmelCase = self.linear(self.silu(self.emb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hidden_dtype=_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = emb.chunk(6 , dim=1 )
_UpperCAmelCase = self.norm(_SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _A ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1e-5 ):
super().__init__()
_UpperCAmelCase = num_groups
_UpperCAmelCase = eps
if act_fn is None:
_UpperCAmelCase = None
else:
_UpperCAmelCase = get_activation(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , out_dim * 2 )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if self.act:
_UpperCAmelCase = self.act(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.linear(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = emb[:, :, None, None]
_UpperCAmelCase , _UpperCAmelCase = emb.chunk(2 , dim=1 )
_UpperCAmelCase = F.group_norm(_SCREAMING_SNAKE_CASE , self.num_groups , eps=self.eps )
_UpperCAmelCase = x * (1 + scale) + shift
return x | 721 |
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> bool:
_UpperCAmelCase = len(snake_case ) + 1
_UpperCAmelCase = len(snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_UpperCAmelCase = [[0 for i in range(snake_case )] for j in range(snake_case )]
# since string of zero length match pattern of zero length
_UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case ):
_UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case ):
_UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case ):
for j in range(1 , snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_UpperCAmelCase = dp[i - 1][j]
else:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a = "aab"
a = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}') | 175 | 0 |
'''simple docstring'''
import random
class _UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
a_ : int = [ord(snake_case__ ) for i in text]
a_ : Optional[int] = []
a_ : int = []
for i in plain:
a_ : List[Any] = random.randint(1 , 3_00 )
a_ : List[Any] = (i + k) * k
cipher.append(snake_case__ )
key.append(snake_case__ )
return cipher, key
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : int = []
for i in range(len(snake_case__ ) ):
a_ : List[str] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case__ ) )
return "".join(snake_case__ )
if __name__ == "__main__":
__snake_case ,__snake_case: List[str] = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 577 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Any = VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
snake_case : int = False
if "finetuned" in model_name:
snake_case : Dict = "huggingface/label-files"
if "kinetics" in model_name:
snake_case : List[str] = 400
snake_case : List[str] = "kinetics400-id2label.json"
elif "ssv2" in model_name:
snake_case : List[str] = 174
snake_case : Optional[int] = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
snake_case : Tuple = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
snake_case : Dict = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
snake_case : Optional[Any] = idalabel
snake_case : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
if "small" in model_name:
snake_case : Dict = 384
snake_case : Optional[Any] = 1536
snake_case : Union[str, Any] = 12
snake_case : Dict = 16
snake_case : List[str] = 12
snake_case : List[str] = 3
snake_case : str = 192
snake_case : Union[str, Any] = 768
elif "large" in model_name:
snake_case : Tuple = 1024
snake_case : List[Any] = 4096
snake_case : Any = 24
snake_case : Optional[int] = 16
snake_case : List[str] = 12
snake_case : int = 8
snake_case : Any = 512
snake_case : List[Any] = 2048
elif "huge" in model_name:
snake_case : Any = 1280
snake_case : List[str] = 5120
snake_case : Dict = 32
snake_case : int = 16
snake_case : int = 12
snake_case : Tuple = 8
snake_case : str = 640
snake_case : List[Any] = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def UpperCamelCase ( __lowerCamelCase : Any ):
if "encoder." in name:
snake_case : Dict = name.replace("encoder." , "" )
if "cls_token" in name:
snake_case : str = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
snake_case : Tuple = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
snake_case : str = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
snake_case : int = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case : List[Any] = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
snake_case : Tuple = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
snake_case : Any = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
snake_case : int = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
snake_case : Optional[Any] = name.replace("attn" , "attention.self" )
if "attn" in name:
snake_case : List[str] = name.replace("attn" , "attention.attention" )
if "norm1" in name:
snake_case : Optional[int] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case : Any = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
snake_case : Optional[int] = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
snake_case : Tuple = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
snake_case : str = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case : List[Any] = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case : Dict = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
snake_case : Union[str, Any] = name.replace("head" , "classifier" )
return name
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for key in orig_state_dict.copy().keys():
snake_case : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if key.startswith("encoder." ):
snake_case : Optional[Any] = key.replace("encoder." , "" )
if "qkv" in key:
snake_case : Tuple = key.split("." )
if key.startswith("decoder.blocks" ):
snake_case : str = config.decoder_hidden_size
snake_case : Optional[Any] = int(key_split[2] )
snake_case : Tuple = "decoder.decoder_layers."
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : List[str] = val[dim : dim * 2, :]
snake_case : str = val[-dim:, :]
else:
snake_case : str = config.hidden_size
snake_case : str = int(key_split[1] )
snake_case : Union[str, Any] = "videomae.encoder.layer."
if "weight" in key:
snake_case : int = val[:dim, :]
snake_case : int = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( ):
snake_case : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
snake_case : Optional[Any] = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
snake_case : List[str] = get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
snake_case : List[Any] = VideoMAEForVideoClassification(__lowerCamelCase )
else:
snake_case : List[str] = VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
snake_case : List[Any] = "pytorch_model.bin"
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
snake_case : str = torch.load(__lowerCamelCase , map_location="cpu" )
if "model" in files:
snake_case : List[str] = files["model"]
else:
snake_case : List[str] = files["module"]
snake_case : List[str] = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
snake_case : Tuple = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case : Dict = prepare_video()
snake_case : Dict = image_processor(__lowerCamelCase , return_tensors="pt" )
if "finetuned" not in model_name:
snake_case : List[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
snake_case : Union[str, Any] = torch.load(__lowerCamelCase )
snake_case : Any = model(**__lowerCamelCase )
snake_case : List[Any] = outputs.logits
snake_case : Any = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case : List[str] = torch.Size([1, 400] )
snake_case : List[str] = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case : Dict = torch.Size([1, 174] )
snake_case : Tuple = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case : Union[str, Any] = torch.Size([1, 1408, 1536] )
snake_case : Union[str, Any] = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case : str = torch.Size([1, 1408, 1536] )
snake_case : List[Any] = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case : int = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case : Optional[int] = torch.Size([1, 1408, 1536] )
snake_case : Tuple = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case : Dict = torch.Size([1, 400] )
snake_case : Any = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case : str = torch.Size([1, 400] )
snake_case : List[str] = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case : int = torch.Size([1, 400] )
snake_case : Tuple = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case : Optional[Any] = torch.Size([1, 400] )
snake_case : Dict = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case : List[Any] = torch.Size([1, 1408, 1536] )
snake_case : Union[str, Any] = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case : Any = torch.Size([1, 174] )
snake_case : str = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case : Dict = torch.Size([1, 1408, 1536] )
snake_case : List[Any] = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case : List[Any] = torch.Size([1, 174] )
snake_case : Optional[int] = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case : List[Any] = outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__lowerCamelCase , organization="nielsr" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCamelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 204 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Optional[int], __lowercase : int, __lowercase : List[str]=7, __lowercase : List[Any]=3, __lowercase : int=30, __lowercase : str=400, __lowercase : Any=True, __lowercase : int=None, __lowercase : Dict=True, __lowercase : List[Any]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], __lowercase : int=True, __lowercase : Optional[Any]=1 / 255, __lowercase : Optional[Any]=True, ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_pad
def A__ ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self : List[str], __lowercase : int, __lowercase : List[Any]=False ):
if not batched:
lowercase__ = image_inputs[0]
if isinstance(__lowercase, Image.Image ):
lowercase__ , lowercase__ = image.size
else:
lowercase__ , lowercase__ = image.shape[1], image.shape[2]
if w < h:
lowercase__ = int(self.size["shortest_edge"] * h / w )
lowercase__ = self.size["shortest_edge"]
elif w > h:
lowercase__ = self.size["shortest_edge"]
lowercase__ = int(self.size["shortest_edge"] * w / h )
else:
lowercase__ = self.size["shortest_edge"]
lowercase__ = self.size["shortest_edge"]
else:
lowercase__ = []
for image in image_inputs:
lowercase__ , lowercase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ = max(__lowercase, key=lambda __lowercase : item[0] )[0]
lowercase__ = max(__lowercase, key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self : List[str] ):
lowercase__ = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
def A__ ( self : Any ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad, __lowercase )
lowercase__ = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=__lowercase )
self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad, __lowercase )
def A__ ( self : List[str] ):
pass
def A__ ( self : List[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase, batched=__lowercase )
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def A__ ( self : List[str] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase, batched=__lowercase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def A__ ( self : List[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase, batched=__lowercase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def A__ ( self : int ):
# prepare image and target
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r" ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = {"image_id": 3_9769, "annotations": target}
# encode them
lowercase__ = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
lowercase__ = image_processing(images=__lowercase, annotations=__lowercase, return_tensors="pt" )
# verify pixel values
lowercase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape, __lowercase )
lowercase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], __lowercase, atol=1e-4 ) )
# verify area
lowercase__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], __lowercase ) )
# verify boxes
lowercase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape, __lowercase )
lowercase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], __lowercase, atol=1e-3 ) )
# verify image_id
lowercase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], __lowercase ) )
# verify is_crowd
lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], __lowercase ) )
# verify class_labels
lowercase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], __lowercase ) )
# verify orig_size
lowercase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], __lowercase ) )
# verify size
lowercase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], __lowercase ) )
@slow
def A__ ( self : Optional[int] ):
# prepare image, target and masks_path
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r" ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowercase__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowercase__ = ConditionalDetrImageProcessor(format="coco_panoptic" )
lowercase__ = image_processing(images=__lowercase, annotations=__lowercase, masks_path=__lowercase, return_tensors="pt" )
# verify pixel values
lowercase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape, __lowercase )
lowercase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], __lowercase, atol=1e-4 ) )
# verify area
lowercase__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], __lowercase ) )
# verify boxes
lowercase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape, __lowercase )
lowercase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], __lowercase, atol=1e-3 ) )
# verify image_id
lowercase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], __lowercase ) )
# verify is_crowd
lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], __lowercase ) )
# verify class_labels
lowercase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], __lowercase ) )
# verify masks
lowercase__ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), __lowercase )
# verify orig_size
lowercase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], __lowercase ) )
# verify size
lowercase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], __lowercase ) )
| 37 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase_ = {
"""squeezebert/squeezebert-uncased""": 5_1_2,
"""squeezebert/squeezebert-mnli""": 5_1_2,
"""squeezebert/squeezebert-mnli-headless""": 5_1_2,
}
lowerCamelCase_ = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class _SCREAMING_SNAKE_CASE( A__ ):
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = SqueezeBertTokenizer
def __init__( self ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__="[UNK]" ,SCREAMING_SNAKE_CASE__="[SEP]" ,SCREAMING_SNAKE_CASE__="[PAD]" ,SCREAMING_SNAKE_CASE__="[CLS]" ,SCREAMING_SNAKE_CASE__="[MASK]" ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[int]:
"""simple docstring"""
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,tokenize_chinese_chars=lowerCamelCase__ ,strip_accents=lowerCamelCase__ ,**lowerCamelCase__ ,)
__SCREAMING_SNAKE_CASE :List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,lowerCamelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE :Union[str, Any] = getattr(lowerCamelCase__ ,normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE :List[Any] = do_lower_case
__SCREAMING_SNAKE_CASE :Optional[Any] = strip_accents
__SCREAMING_SNAKE_CASE :str = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE :Any = normalizer_class(**lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Any = do_lower_case
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ ) | 498 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ViTImageProcessor if is_vision_available() else None
@property
def __A ( self : Dict ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ = (3, 32, 128)
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE_ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
SCREAMING_SNAKE_CASE_ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
SCREAMING_SNAKE_CASE_ = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def __A ( self : int , **__magic_name__ : List[str] ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __A ( self : List[str] , **__magic_name__ : Optional[Any] ) -> Dict:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def __A ( self : str ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
SCREAMING_SNAKE_CASE_ = Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) )
return image_input
def __A ( self : Any ) -> str:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "test"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "test"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.char_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __A ( self : str ) -> Any:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.randn(1 , 27 , 38 )
SCREAMING_SNAKE_CASE_ = torch.randn(1 , 27 , 50_257 )
SCREAMING_SNAKE_CASE_ = torch.randn(1 , 27 , 30_522 )
SCREAMING_SNAKE_CASE_ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 709 | import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
A : Any = logging.getLogger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=-1 ) -> Optional[Any]:
# in NER datasets, the last column is usually reserved for NER label
SCREAMING_SNAKE_CASE_ = label_idx
def __A ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = mode.value
SCREAMING_SNAKE_CASE_ = os.path.join(__magic_name__ , F'''{mode}.txt''' )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = []
with open(__magic_name__ , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__magic_name__ , labels=__magic_name__ ) )
guid_index += 1
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
else:
SCREAMING_SNAKE_CASE_ = line.split(" " )
words.append(splits[0] )
if len(__magic_name__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__magic_name__ , labels=__magic_name__ ) )
return examples
def __A ( self : Tuple , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(__magic_name__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE_ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(__magic_name__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Optional[int] , __magic_name__ : str ) -> List[str]:
if path:
with open(__magic_name__ , "r" ) as f:
SCREAMING_SNAKE_CASE_ = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : Any , __magic_name__ : str ) -> List[str]:
if path:
with open(__magic_name__ , "r" ) as f:
SCREAMING_SNAKE_CASE_ = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = mode.value
SCREAMING_SNAKE_CASE_ = os.path.join(__magic_name__ , F'''{mode}.txt''' )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = []
with open(__magic_name__ , encoding="utf-8" ) as f:
for sentence in parse_incr(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(__magic_name__ ) == len(__magic_name__ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__magic_name__ , labels=__magic_name__ ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = 0
for sentence in parse_incr(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = preds_list[example_id]
SCREAMING_SNAKE_CASE_ = ""
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(__magic_name__ )
example_id += 1
def __A ( self : Optional[int] , __magic_name__ : str ) -> List[str]:
if path:
with open(__magic_name__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 356 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __lowercase :
_a = BlenderbotConfig
_a = {}
_a = """gelu"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=20 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=0 , ) -> List[str]:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a = tf.concat([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_blenderbot_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
__a = TFBlenderbotModel(config=UpperCamelCase ).get_decoder()
__a = inputs_dict['input_ids']
__a = input_ids[:1, :]
__a = inputs_dict['attention_mask'][:1, :]
__a = inputs_dict['head_mask']
__a = 1
# first forward pass
__a = model(UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase , use_cache=UpperCamelCase )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a = tf.concat([input_ids, next_tokens] , axis=-1 )
__a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
__a = model(UpperCamelCase , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a = output_from_no_past[:, -3:, random_slice_idx]
__a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( a_ : List[Any] , a_ : Dict , a_ : str , a_ : List[str]=None , a_ : List[Any]=None , a_ : str=None , a_ : Tuple=None , a_ : List[Any]=None , ):
if attention_mask is None:
__a = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
_a = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_a = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = TFBlenderbotModelTester(self )
__a = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> int:
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase )
@require_tokenizers
@require_tf
class __lowercase ( unittest.TestCase ):
_a = ["""My friends are cool but they eat too many carbs."""]
_a = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCamelCase__ ( self ) -> Any:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ) -> Tuple:
__a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ) -> int:
__a = self.tokenizer(self.src_text , return_tensors='tf' )
__a = self.model.generate(
model_inputs.input_ids , )
__a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 539 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def UpperCamelCase__ ( self ) -> List[str]:
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
__a = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=0 , UpperCamelCase=True ) -> Dict:
if str(UpperCamelCase ).startswith('mps' ):
__a = torch.manual_seed(UpperCamelCase )
else:
__a = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(UpperCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self ) -> int:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**UpperCamelCase )
__a = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = self.get_dummy_inputs(UpperCamelCase )
inputs.update({'image_embeds': None} )
__a = sd_pipe(**UpperCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) -> Any:
__a = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> str:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCamelCase , 'anime turle' , generator=UpperCamelCase , output_type='np' )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCamelCase , 'anime turle' , generator=UpperCamelCase , output_type='np' )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
UpperCamelCase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 539 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : Any =XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__snake_case : int =torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__snake_case : int =torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case : List[Any] =torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case : Any =model(a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a , atol=1e-3 ) )
@slow
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Union[str, Any] =XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__snake_case : Union[str, Any] =torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__snake_case : Optional[int] =torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case : str =torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case : Dict =model(a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a , atol=1e-3 ) )
| 716 |
"""simple docstring"""
def __lowercase ( a : str , a : str ) -> str:
__snake_case : int =len(a )
__snake_case : int =len(a )
__snake_case : int =(
first_str_length if first_str_length > second_str_length else second_str_length
)
__snake_case : list =[]
for char_count in range(a ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(a )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 497 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.