code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[int] , _snake_case :list[int] , _snake_case :int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] , _snake_case :int , _snake_case :list[int] , _snake_case :int ) -> bool:
# Base Case
if index == len(_snake_case ):
return True
# Recursive Step
for i in range(_snake_case ):
if valid_coloring(graph[index] , _snake_case , _snake_case ):
# Color current vertex
_A = i
# Validate coloring
if util_color(_snake_case , _snake_case , _snake_case , index + 1 ):
return True
# Backtrack
_A = -1
return False
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] , _snake_case :int ) -> list[int]:
_A = [-1] * len(_snake_case )
if util_color(_snake_case , _snake_case , _snake_case , 0 ):
return colored_vertices
return []
| 2 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> List[Any]:
_A = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
_A = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
_A = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_A = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
_A = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
_A = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_A = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
_A = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
_A = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
_A = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
_A = key[key.find('''block''' ) + len('''block''' )]
_A = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
_A = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
_A = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
_A = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
_A = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
_A = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
_A = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
_A = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
_A = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_A = key[key.find('''linear_c''' ) + len('''linear_c''' )]
_A = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
_A = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
_A = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
_A = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
_A = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
_A = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
_A = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
_A = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
_A = key.replace('''module.last_layer_depth''' , '''head.head''' )
_A = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :Any ) -> str:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_A = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_A = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_A = kv_weight[
: config.hidden_sizes[i], :
]
_A = kv_bias[: config.hidden_sizes[i]]
_A = kv_weight[
config.hidden_sizes[i] :, :
]
_A = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Any , _snake_case :str=False , _snake_case :Tuple=None ) -> Any:
_A = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_A = GLPNImageProcessor()
# prepare image
_A = prepare_img()
_A = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
_A = torch.load(_snake_case , map_location=torch.device('''cpu''' ) )
# rename keys
_A = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
_A = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
_A = model(_snake_case )
_A = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_A = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
_A = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_A = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
UpperCAmelCase_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 2 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = PegasusTokenizer
a__ : Any = PegasusTokenizerFast
a__ : int = True
a__ : Union[str, Any] = True
def snake_case_ ( self : Tuple ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A = PegasusTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ ( self : Union[str, Any] ) -> int:
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def snake_case_ ( self : Union[str, Any] , **__lowerCAmelCase : Any ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> List[Any]:
return ("This is a test", "This is a test")
def snake_case_ ( self : Dict ) -> Dict:
_A = '''</s>'''
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> List[str]:
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__lowerCAmelCase ) , 11_03 )
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def snake_case_ ( self : Any ) -> int:
_A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
_A = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
_A = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : List[str] ) -> Optional[Any]:
_A = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
_A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_A = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
_A = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
_A = '''To ensure a smooth flow of bank resolutions.'''
_A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_A = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case_ ( self : Dict ) -> Any:
_A = ['''This is going to be way too long.''' * 1_50, '''short example''']
_A = ['''not super long but more than 5 tokens''', '''tiny''']
_A = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
_A = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
# fmt: off
_A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : List[str] = PegasusTokenizer
a__ : Dict = PegasusTokenizerFast
a__ : Optional[int] = True
a__ : Any = True
def snake_case_ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_A = PegasusTokenizer(__lowerCAmelCase , offset=0 , mask_token_sent=__lowerCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ ( self : Dict ) -> str:
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def snake_case_ ( self : Any , **__lowerCAmelCase : Tuple ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
return ("This is a test", "This is a test")
def snake_case_ ( self : Any ) -> Optional[Any]:
_A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
_A = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
_A = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_torch
def snake_case_ ( self : List[Any] ) -> str:
_A = ['''This is going to be way too long.''' * 10_00, '''short example''']
_A = ['''not super long but more than 5 tokens''', '''tiny''']
_A = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
_A = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
_A = self._large_tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(
__lowerCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 2 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : str = "realm"
def __init__( self : List[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : List[str]=7_68 , __lowerCAmelCase : str=1_28 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Tuple=30_72 , __lowerCAmelCase : Optional[int]="gelu_new" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : str=1E-12 , __lowerCAmelCase : Tuple=2_56 , __lowerCAmelCase : List[str]=10 , __lowerCAmelCase : Union[str, Any]=1E-3 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : List[Any]=3_20 , __lowerCAmelCase : Any=13_35_37_18 , __lowerCAmelCase : Tuple=50_00 , __lowerCAmelCase : str=1 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Union[str, Any]=2 , **__lowerCAmelCase : Dict , ) -> int:
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
# Common config
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = retriever_proj_size
_A = num_hidden_layers
_A = num_attention_heads
_A = num_candidates
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = type_vocab_size
_A = layer_norm_eps
# Reader config
_A = span_hidden_size
_A = max_span_width
_A = reader_layer_norm_eps
_A = reader_beam_size
_A = reader_seq_len
# Retrieval config
_A = num_block_records
_A = searcher_beam_size
| 2 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 1 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCAmelCase_ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
UpperCAmelCase_ = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
UpperCAmelCase_ = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
UpperCAmelCase_ = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
UpperCAmelCase_ = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCAmelCase_ = np.expand_dims(test_image, axis=0)
UpperCAmelCase_ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCAmelCase_ = """Normal"""
if result[0][0] == 1:
UpperCAmelCase_ = """Abnormality detected"""
| 2 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase__ :
"""simple docstring"""
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ) -> Dict:
return None
class lowerCamelCase__ :
"""simple docstring"""
def snake_case_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ) -> List[str]:
return None
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
a__ : Optional[int] = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case_ ( self : Tuple ) -> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCAmelCase , '''tf''' , 12 , **__lowerCAmelCase )
@require_torch
@slow
def snake_case_ ( self : Dict ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCAmelCase , '''pt''' , 12 , **__lowerCAmelCase )
@require_torch
@slow
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
from transformers import BertModel
_A = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__lowerCAmelCase ) )
vocab_file.flush()
_A = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_A = BertModel(BertConfig(vocab_size=len(__lowerCAmelCase ) ) )
model.save_pretrained(__lowerCAmelCase )
self._test_export(__lowerCAmelCase , '''pt''' , 12 , __lowerCAmelCase )
@require_tf
@slow
def snake_case_ ( self : Optional[int] ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_A = self._test_export(__lowerCAmelCase , '''tf''' , 12 , **__lowerCAmelCase )
_A = quantize(Path(__lowerCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCAmelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def snake_case_ ( self : Tuple ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_A = self._test_export(__lowerCAmelCase , '''pt''' , 12 , **__lowerCAmelCase )
_A = quantize(__lowerCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCAmelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Any ) -> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
_A = Path(__lowerCAmelCase ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
return path
except Exception as e:
self.fail(__lowerCAmelCase )
@require_torch
@require_tokenizers
@slow
def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import BertModel
_A = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_A = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__lowerCAmelCase , __lowerCAmelCase , '''pt''' )
@require_tf
@require_tokenizers
@slow
def snake_case_ ( self : int ) -> str:
from transformers import TFBertModel
_A = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_A = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__lowerCAmelCase , __lowerCAmelCase , '''tf''' )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ) -> Union[str, Any]:
_A = FeatureExtractionPipeline(__lowerCAmelCase , __lowerCAmelCase )
_A = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
_A , _A , _A , _A = infer_shapes(__lowerCAmelCase , __lowerCAmelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCAmelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def snake_case_ ( self : int ) -> List[str]:
_A = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
_A = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
_A , _A = ensure_valid_input(FuncContiguousArgs() , __lowerCAmelCase , __lowerCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCAmelCase ) , set(__lowerCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCAmelCase , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_A , _A = ensure_valid_input(FuncNonContiguousArgs() , __lowerCAmelCase , __lowerCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCAmelCase ) , 1 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def snake_case_ ( self : List[Any] ) -> int:
_A = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase_ = 1_0_0
UpperCAmelCase_ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase_ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_A = set()
_A = 42
_A = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 5_000 ) -> int | None:
for number_to_partition in range(1 , _snake_case ):
if len(partition(_snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 2 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 | 1 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if any(not isinstance(_snake_case , _snake_case ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(_snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 2 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> YolosConfig:
_A = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_A = 192
_A = 768
_A = 12
_A = 3
_A = [800, 1_333]
_A = False
elif yolos_name == "yolos_s_dWr":
_A = 330
_A = 14
_A = 6
_A = 1_320
elif "yolos_s" in yolos_name:
_A = 384
_A = 1_536
_A = 12
_A = 6
elif "yolos_b" in yolos_name:
_A = [800, 1_344]
_A = 91
_A = '''huggingface/label-files'''
_A = '''coco-detection-id2label.json'''
_A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(_snake_case ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :YolosConfig , _snake_case :bool = False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[: config.hidden_size, :]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[-config.hidden_size :, :]
_A = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> str:
if "backbone" in name:
_A = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
_A = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
_A = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
_A = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
_A = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_A = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_A = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_A = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
_A = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
_A = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
_A = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(_snake_case )
if "qkv" in key:
_A = key.split('''.''' )
_A = int(key_split[2] )
_A = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_A = val[:dim, :]
_A = val[
dim : dim * 2, :
]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
else:
_A = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( ) -> torch.Tensor:
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str , _snake_case :str , _snake_case :bool = False ) -> Dict:
_A = get_yolos_config(_snake_case )
# load original state_dict
_A = torch.load(_snake_case , map_location='''cpu''' )['''model''']
# load 🤗 model
_A = YolosForObjectDetection(_snake_case )
model.eval()
_A = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
_A = 800 if yolos_name != '''yolos_ti''' else 512
_A = YolosImageProcessor(format='''coco_detection''' , size=_snake_case )
_A = image_processor(images=prepare_img() , return_tensors='''pt''' )
_A = model(**_snake_case )
_A , _A = outputs.logits, outputs.pred_boxes
_A , _A = None, None
if yolos_name == "yolos_ti":
_A = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
_A = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
_A = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
_A = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
_A = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
_A = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
_A = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
_A = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
_A = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
_A = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _snake_case , atol=1E-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
_A = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
_A = model_mapping[yolos_name]
image_processor.push_to_hub(_snake_case , organization='''hustvl''' )
model.push_to_hub(_snake_case , organization='''hustvl''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 2 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=13 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : List[Any]=[10, 20, 30, 40] , __lowerCAmelCase : str=[2, 2, 3, 2] , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : List[Any]=10 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Any=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[Any]=[2, 3, 4] , __lowerCAmelCase : List[Any]=None , ) -> Optional[int]:
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = num_stages
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = intermediate_size
_A = hidden_act
_A = num_labels
_A = initializer_range
_A = out_features
_A = out_indices
_A = scope
def snake_case_ ( self : Any ) -> List[Any]:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : Tuple ) -> List[str]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case_ ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> List[str]:
_A = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Any ) -> Optional[Any]:
_A = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ) -> int:
_A = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A = None
_A = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case_ ( self : Any ) -> Union[str, Any]:
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
def snake_case_ ( self : Optional[int] ) -> Optional[Any]:
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : Optional[int] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a__ : Tuple = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a__ : Optional[Any] = False
a__ : Any = False
a__ : Dict = False
a__ : Optional[Any] = False
a__ : Optional[Any] = False
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = ConvNextVaModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self : Any ) -> str:
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def snake_case_ ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def snake_case_ ( self : List[Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
pass
def snake_case_ ( self : int ) -> List[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_A , _A = self.model_tester.prepare_config_and_inputs_with_labels()
_A = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
_A = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_A = model(**__lowerCAmelCase ).loss
loss.backward()
def snake_case_ ( self : List[str] ) -> Any:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_A , _A = self.model_tester.prepare_config_and_inputs_with_labels()
_A = False
_A = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_A = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_A = model(**__lowerCAmelCase ).loss
loss.backward()
def snake_case_ ( self : Union[str, Any] ) -> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__lowerCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> Any:
def check_hidden_states_output(__lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : List[str] ):
_A = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Optional[int] ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def snake_case_ ( self : List[str] ) -> Union[str, Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def snake_case_ ( self : List[str] ) -> Dict:
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def snake_case_ ( self : Tuple ) -> int:
_A = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(__lowerCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = preprocessor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**__lowerCAmelCase )
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_A = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 2 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : str ) -> None:
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 2 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Tuple = ["image_processor", "tokenizer"]
a__ : Optional[Any] = "BlipImageProcessor"
a__ : Any = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
_A = False
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_A = self.image_processor
def __call__( self : List[Any] , __lowerCAmelCase : ImageInput = None , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Union[str, Any] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_A = self.tokenizer
_A = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
return text_encoding
# add pixel_values
_A = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
if text is not None:
_A = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_A = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCAmelCase )
return encoding_image_processor
def snake_case_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : int , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[int] ) -> Tuple:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 2 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 | 1 |
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _snake_case :np.ndarray , _snake_case :tuple[int, int] , _snake_case :tuple[int, int] , _snake_case :bool , ) -> tuple[float | int, list[tuple[int, int]]]:
_A , _A = grid.shape
_A = [-1, 1, 0, 0]
_A = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_A , _A = [(0, source)], set()
_A = np.full((rows, cols) , np.inf )
_A = 0
_A = np.empty((rows, cols) , dtype=_snake_case )
_A = None
while queue:
((_A) , (_A)) = heappop(_snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_A = []
while (x, y) != source:
path.append((x, y) )
_A , _A = predecessors[x, y]
path.append(_snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_snake_case ) ):
_A , _A = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_A = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_snake_case , (dist + 1, (nx, ny)) )
_A = dist + 1
_A = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[int] , _snake_case :int ) -> list[int]:
_A = 0
_A = len(_snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_A = i + 1
else:
_A = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 1_1, 1_5], 9) = }')
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_snake_case ) == 1:
return True
_A = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_A = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_A = QuantumRegister(_snake_case , '''qr''' )
_A = ClassicalRegister(_snake_case , '''cr''' )
_A = QuantumCircuit(_snake_case , _snake_case )
_A = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_A = Aer.get_backend('''qasm_simulator''' )
_A = execute(_snake_case , _snake_case , shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 2 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_A) , "Tatoeba directory does not exist.")
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]:
_A = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]:
self.resolver.convert_models(['''heb-eng'''] )
@slow
def snake_case_ ( self : List[str] ) -> Optional[Any]:
_A , _A = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 2 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_A = getattr(_snake_case , _snake_case )
if weight_type is not None:
_A = getattr(_snake_case , _snake_case ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_snake_case )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any:
_A = full_name.split('''conv_layers.''' )[-1]
_A = name.split('''.''' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple:
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1E-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(_snake_case )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]:
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(_snake_case )
else:
_A = convert_config(model[0] , _snake_case )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
_A = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
_A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
_A = SEWForCTC(_snake_case )
else:
_A = SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 2 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : int=3 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : int=36 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : int=6 , __lowerCAmelCase : Tuple=6 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=10_00 , ) -> List[str]:
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = patch_size
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = coordinate_size
_A = shape_size
_A = num_labels
_A = num_choices
_A = scope
_A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_A = text_seq_length
_A = (image_size // patch_size) ** 2 + 1
_A = self.text_seq_length + self.image_seq_length
def snake_case_ ( self : List[Any] ) -> Dict:
_A = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_A = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_A = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_A = bbox[i, j, 3]
_A = bbox[i, j, 1]
_A = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_A = bbox[i, j, 2]
_A = bbox[i, j, 0]
_A = tmp_coordinate
_A = tf.constant(__lowerCAmelCase )
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.text_seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_A = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
_A = TFLayoutLMvaModel(config=__lowerCAmelCase )
# text + image
_A = model(__lowerCAmelCase , pixel_values=__lowerCAmelCase , training=__lowerCAmelCase )
_A = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , training=__lowerCAmelCase , )
_A = model(__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_A = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_A = model({'''pixel_values''': pixel_values} , training=__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case_ ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ) -> List[str]:
_A = self.num_labels
_A = TFLayoutLMvaForSequenceClassification(config=__lowerCAmelCase )
_A = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = self.num_labels
_A = TFLayoutLMvaForTokenClassification(config=__lowerCAmelCase )
_A = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Optional[int]:
_A = 2
_A = TFLayoutLMvaForQuestionAnswering(config=__lowerCAmelCase )
_A = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , training=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Tuple ) -> Union[str, Any]:
_A = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A) , (_A) , (_A)) = config_and_inputs
_A = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
a__ : str = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
a__ : Tuple = False
a__ : List[Any] = False
a__ : int = False
def snake_case_ ( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
return True
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=False ) -> dict:
_A = copy.deepcopy(__lowerCAmelCase )
if model_class in get_values(__lowerCAmelCase ):
_A = {
k: tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__lowerCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_A = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowerCAmelCase ):
_A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowerCAmelCase ):
_A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowerCAmelCase ):
_A = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def snake_case_ ( self : Any ) -> List[str]:
_A = TFLayoutLMvaModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
def snake_case_ ( self : str ) -> int:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__lowerCAmelCase )
if getattr(__lowerCAmelCase , '''hf_compute_loss''' , __lowerCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
_A = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_A = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__lowerCAmelCase )[0]
]
_A = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_A = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_A = prepared_for_class.pop('''input_ids''' )
_A = model(__lowerCAmelCase , **__lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_A = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_A = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
_A = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_A = -1_00
_A = tf.convert_to_tensor(__lowerCAmelCase )
_A = model(__lowerCAmelCase , **__lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_A = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_A = model(__lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_A = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase )
# Get keys that were added with the _prepare_for_class function
_A = prepared_for_class.keys() - inputs_dict.keys()
_A = inspect.signature(model.call ).parameters
_A = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_A = {0: '''input_ids'''}
for label_key in label_keys:
_A = signature_names.index(__lowerCAmelCase )
_A = label_key
_A = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_A = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_A = prepared_for_class[value]
_A = tuple(__lowerCAmelCase )
# Send to model
_A = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def snake_case_ ( self : Tuple ) -> Optional[int]:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict ) -> int:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> Dict:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict ) -> List[str]:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> Optional[Any]:
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@slow
def snake_case_ ( self : Dict ) -> Union[str, Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFLayoutLMvaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def snake_case_ ( self : List[Any] ) -> Union[str, Any]:
return LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase ) if is_vision_available() else None
@slow
def snake_case_ ( self : Union[str, Any] ) -> Any:
_A = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' ).pixel_values
_A = tf.constant([[1, 2]] )
_A = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_A = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , training=__lowerCAmelCase )
# verify the logits
_A = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __lowerCAmelCase )
_A = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 2 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def snake_case_ ( self : int ) -> Optional[int]:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def snake_case_ ( self : Optional[int] ) -> int:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 2 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _snake_case :np.array ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = tempfile.mkdtemp()
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ) -> Optional[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> List[str]:
_A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Optional[Any] ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : List[str] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : int ) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 2 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def snake_case_ ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict:
if not batched:
_A = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Any:
_A = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def snake_case_ ( self : List[str] ) -> int:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 2 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCAmelCase_ = {
"""YituTech/conv-bert-base""": 5_1_2,
"""YituTech/conv-bert-medium-small""": 5_1_2,
"""YituTech/conv-bert-small""": 5_1_2,
}
UpperCAmelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
a__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = ConvBertTokenizer
def __init__( self : str , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Any="[UNK]" , __lowerCAmelCase : Optional[Any]="[SEP]" , __lowerCAmelCase : Optional[int]="[PAD]" , __lowerCAmelCase : Optional[Any]="[CLS]" , __lowerCAmelCase : Optional[Any]="[MASK]" , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] , ) -> Tuple:
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , )
_A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowerCAmelCase ) != tokenize_chinese_chars
):
_A = getattr(__lowerCAmelCase , normalizer_state.pop('''type''' ) )
_A = do_lower_case
_A = strip_accents
_A = tokenize_chinese_chars
_A = normalizer_class(**__lowerCAmelCase )
_A = do_lower_case
def snake_case_ ( self : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any=None ) -> List[str]:
_A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
_A = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 2 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = "unispeech-sat"
def __init__( self : Tuple , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : List[Any]=7_68 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Dict=30_72 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : List[Any]="group" , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : int=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=1_28 , __lowerCAmelCase : Tuple=16 , __lowerCAmelCase : int=False , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : str=10 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Any=10 , __lowerCAmelCase : str=0 , __lowerCAmelCase : List[str]=3_20 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Dict=1_00 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : List[Any]=2_56 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]="mean" , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=2_56 , __lowerCAmelCase : int=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowerCAmelCase : Optional[Any]=(5, 3, 3, 1, 1) , __lowerCAmelCase : str=(1, 2, 3, 1, 1) , __lowerCAmelCase : Union[str, Any]=5_12 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Dict=5_04 , **__lowerCAmelCase : Optional[Any] , ) -> int:
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_A = hidden_size
_A = feat_extract_norm
_A = feat_extract_activation
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = conv_bias
_A = num_conv_pos_embeddings
_A = num_conv_pos_embedding_groups
_A = len(self.conv_dim )
_A = num_hidden_layers
_A = intermediate_size
_A = hidden_act
_A = num_attention_heads
_A = hidden_dropout
_A = attention_dropout
_A = activation_dropout
_A = feat_proj_dropout
_A = final_dropout
_A = layerdrop
_A = layer_norm_eps
_A = initializer_range
_A = vocab_size
_A = num_clusters
_A = do_stable_layer_norm
_A = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A = apply_spec_augment
_A = mask_time_prob
_A = mask_time_length
_A = mask_time_min_masks
_A = mask_feature_prob
_A = mask_feature_length
_A = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_A = num_codevectors_per_group
_A = num_codevector_groups
_A = contrastive_logits_temperature
_A = feat_quantizer_dropout
_A = num_negatives
_A = codevector_dim
_A = proj_codevector_dim
_A = diversity_loss_weight
# ctc loss
_A = ctc_loss_reduction
_A = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_A = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = xvector_output_dim
@property
def snake_case_ ( self : Tuple ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[int] , _snake_case :int ) -> bool:
if len(_snake_case ) == 0:
return False
_A = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = input("""Enter numbers separated by comma:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
UpperCAmelCase_ = int(input("""Enter the number to be found in the list:\n""").strip())
UpperCAmelCase_ = """""" if binary_search(sequence, target) else """not """
print(f'{target} was {not_str}found in {sequence}')
| 2 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Dict , _snake_case :Optional[int] ) -> List[Any]:
_A = hf_hub_url(repo_id=_snake_case , path=_snake_case , revision=_snake_case )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_snake_case )}'''
| 2 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 4 ) -> list[list[int]]:
_A = abs(_snake_case ) or 4
return [[1 + x + y * row_size for x in range(_snake_case )] for y in range(_snake_case )]
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
return reverse_row(transpose(_snake_case ) )
# OR.. transpose(reverse_column(matrix))
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
return reverse_row(reverse_column(_snake_case ) )
# OR.. reverse_column(reverse_row(matrix))
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
return reverse_column(transpose(_snake_case ) )
# OR.. transpose(reverse_row(matrix))
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
_A = [list(_snake_case ) for x in zip(*_snake_case )]
return matrix
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
_A = matrix[::-1]
return matrix
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> list[list[int]]:
_A = [x[::-1] for x in matrix]
return matrix
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] ) -> None:
for i in matrix:
print(*_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
UpperCAmelCase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
UpperCAmelCase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 2 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase_ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( _snake_case :torch.nn.Module , _snake_case :BnbQuantizationConfig , _snake_case :Union[str, os.PathLike] = None , _snake_case :Optional[Dict[str, Union[int, str, torch.device]]] = None , _snake_case :Optional[List[str]] = None , _snake_case :Optional[Dict[Union[int, str], Union[int, str]]] = None , _snake_case :Optional[Union[str, os.PathLike]] = None , _snake_case :bool = False , ) -> Dict:
_A = bnb_quantization_config.load_in_abit
_A = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
_A = []
# custom device map
if isinstance(_snake_case , _snake_case ) and len(device_map.keys() ) > 1:
_A = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_A = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
_A = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_A = []
_A = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
_A = load_in_abit
_A = load_in_abit
_A = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
_A = replace_with_bnb_layers(_snake_case , _snake_case , modules_to_not_convert=_snake_case )
# convert param to the right dtype
_A = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_A = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
_A = getattr(_snake_case , _snake_case , _snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_A = replace_with_bnb_layers(
_snake_case , _snake_case , modules_to_not_convert=_snake_case )
_A = get_quantized_model_device_map(
_snake_case , _snake_case , _snake_case , max_memory=_snake_case , no_split_module_classes=_snake_case , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_A = True
_A = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
_snake_case , _snake_case , _snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=_snake_case , offload_state_dict=_snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_snake_case , device_map=_snake_case , offload_dir=_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Optional[Any] , _snake_case :Tuple=None , _snake_case :Union[str, Any]=None , _snake_case :Tuple=None ) -> Optional[Any]:
if device_map is None:
if torch.cuda.is_available():
_A = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(_snake_case , _snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
_A = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_A = {}
_A = special_dtypes
_A = no_split_module_classes
_A = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_A = get_balanced_memory(
_snake_case , low_zero=(device_map == '''balanced_low_0''') , max_memory=_snake_case , **_snake_case , )
_A = max_memory
_A = infer_auto_device_map(_snake_case , **_snake_case )
if isinstance(_snake_case , _snake_case ):
# check if don't have any quantized module on the cpu
_A = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_A = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :Tuple , _snake_case :Dict=None , _snake_case :Optional[Any]=None ) -> int:
if modules_to_not_convert is None:
_A = []
_A , _A = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Any , _snake_case :List[str]=None , _snake_case :Optional[int]=None , ) -> int:
_A = False
for name, module in model.named_children():
if current_key_name is None:
_A = []
current_key_name.append(_snake_case )
if isinstance(_snake_case , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_A = '''.'''.join(_snake_case )
_A = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_A = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_A = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_snake_case , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_A = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
_A = module.weight.data
if module.bias is not None:
_A = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case , _snake_case , _snake_case )
_A = True
if len(list(module.children() ) ) > 0:
_A , _A = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case )
_A = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> List[str]:
# Create a copy of the model
with init_empty_weights():
_A = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_A = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case , _snake_case ):
_A = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_A = sum(_snake_case , [] )
_A = len(_snake_case ) > 0
# Check if it is a base model
_A = False
if hasattr(_snake_case , '''base_model_prefix''' ):
_A = not hasattr(_snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_A = list(model.named_children() )
_A = [list_modules[-1][0]]
# add last module together with tied weights
_A = set(_snake_case ) - set(_snake_case )
_A = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
_A = ['''.weight''', '''.bias''']
_A = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_A = name.replace(_snake_case , '''''' )
filtered_module_names.append(_snake_case )
return filtered_module_names
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> Union[str, Any]:
for m in model.modules():
if isinstance(_snake_case , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE_ ( _snake_case :nn.Module ) -> int:
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :Optional[Any] , _snake_case :Optional[int] , _snake_case :Optional[int] , _snake_case :List[str] , _snake_case :Dict , _snake_case :Optional[int] ) -> Dict:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case , _snake_case , 0 , dtype=_snake_case , value=_snake_case )
_A = param_name
_A = model
if "." in tensor_name:
_A = tensor_name.split('''.''' )
for split in splits[:-1]:
_A = getattr(_snake_case , _snake_case )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_A = new_module
_A = splits[-1]
# offload weights
_A = False
offload_weight(module._parameters[tensor_name] , _snake_case , _snake_case , index=_snake_case )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , _snake_case , index=_snake_case , )
else:
offload_weight(_snake_case , _snake_case , _snake_case , index=_snake_case )
offload_weight(_snake_case , param_name.replace('''weight''' , '''SCB''' ) , _snake_case , index=_snake_case )
set_module_tensor_to_device(_snake_case , _snake_case , '''meta''' , dtype=_snake_case , value=torch.empty(*param.size() ) )
| 2 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Union[str, Any] , _snake_case :List[str] , _snake_case :Union[str, Any] , _snake_case :List[Any]=True , _snake_case :Optional[Any]="pt" ) -> Optional[Any]:
_A = {'''add_prefix_space''': True} if isinstance(_snake_case , _snake_case ) and not line.startswith(''' ''' ) else {}
_A = padding_side
return tokenizer(
[line] , max_length=_snake_case , padding='''max_length''' if pad_to_max_length else None , truncation=_snake_case , return_tensors=_snake_case , add_special_tokens=_snake_case , **_snake_case , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Optional[Any] , _snake_case :List[Any]=None , ) -> List[str]:
_A = input_ids.ne(_snake_case ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict="train" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Any="" , ) -> Optional[Any]:
super().__init__()
_A = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_A = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_A = self.get_char_lens(self.src_file )
_A = max_source_length
_A = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_A = tokenizer
_A = prefix
if n_obs is not None:
_A = self.src_lens[:n_obs]
_A = src_lang
_A = tgt_lang
def __len__( self : Union[str, Any] ) -> List[Any]:
return len(self.src_lens )
def __getitem__( self : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Dict[str, torch.Tensor]:
_A = index + 1 # linecache starts at 1
_A = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_A = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_A = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_A = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_A = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_A = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_A = source_inputs['''input_ids'''].squeeze()
_A = target_inputs['''input_ids'''].squeeze()
_A = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( __lowerCAmelCase : int ) -> List[Any]:
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Dict ) -> Dict[str, torch.Tensor]:
_A = torch.stack([x['''input_ids'''] for x in batch] )
_A = torch.stack([x['''attention_mask'''] for x in batch] )
_A = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_A = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_A = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_A = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_A , _A = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_A = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase_ = getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[List] ) -> List[str]:
return list(itertools.chain.from_iterable(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> None:
_A = get_git_info()
save_json(_snake_case , os.path.join(_snake_case , '''git_log.json''' ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Dict , _snake_case :Any=4 , **_snake_case :Optional[Any] ) -> str:
with open(_snake_case , '''w''' ) as f:
json.dump(_snake_case , _snake_case , indent=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> List[str]:
with open(_snake_case ) as f:
return json.load(_snake_case )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
_A = git.Repo(search_parent_directories=_snake_case )
_A = {
'''repo_id''': str(_snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE_ ( _snake_case :Callable , _snake_case :Iterable ) -> List:
return list(map(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] , _snake_case :int ) -> Optional[Any]:
with open(_snake_case , '''wb''' ) as f:
return pickle.dump(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] ) -> Any:
def remove_articles(_snake_case :Optional[int] ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , _snake_case )
def white_space_fix(_snake_case :Tuple ):
return " ".join(text.split() )
def remove_punc(_snake_case :str ):
_A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case :Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Optional[int] ) -> Optional[int]:
_A = normalize_answer(_snake_case ).split()
_A = normalize_answer(_snake_case ).split()
_A = Counter(_snake_case ) & Counter(_snake_case )
_A = sum(common.values() )
if num_same == 0:
return 0
_A = 1.0 * num_same / len(_snake_case )
_A = 1.0 * num_same / len(_snake_case )
_A = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :Optional[int] ) -> int:
return normalize_answer(_snake_case ) == normalize_answer(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :List[str] ) -> Dict:
assert len(_snake_case ) == len(_snake_case )
_A = 0
for hypo, pred in zip(_snake_case , _snake_case ):
em += exact_match_score(_snake_case , _snake_case )
if len(_snake_case ) > 0:
em /= len(_snake_case )
return {"em": em}
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> str:
return model_prefix.startswith('''rag''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] , _snake_case :List[Any] , _snake_case :Union[str, Any] ) -> Union[str, Any]:
_A = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_A = '''dropout_rate'''
for p in extra_params:
if getattr(_snake_case , _snake_case , _snake_case ):
if not hasattr(_snake_case , _snake_case ) and not hasattr(_snake_case , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(_snake_case ) )
delattr(_snake_case , _snake_case )
continue
_A = p if hasattr(_snake_case , _snake_case ) else equivalent_param[p]
setattr(_snake_case , _snake_case , getattr(_snake_case , _snake_case ) )
delattr(_snake_case , _snake_case )
return hparams, config
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
import math
UpperCAmelCase_ = 1_0
UpperCAmelCase_ = 7
UpperCAmelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 20 ) -> str:
_A = math.comb(_snake_case , _snake_case )
_A = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _snake_case )
_A = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 2 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 | 1 |
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ = """examples/"""
UpperCAmelCase_ = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ = """README.md"""
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :int , _snake_case :List[str] ) -> Dict:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.read()
_A , _A = REPLACE_PATTERNS[pattern]
_A = replace.replace('''VERSION''' , _snake_case )
_A = re_pattern.sub(_snake_case , _snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> List[Any]:
for folder, directories, fnames in os.walk(_snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_snake_case , _snake_case ) , _snake_case , pattern='''examples''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Optional[int]=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_snake_case , _snake_case , _snake_case )
if not patch:
update_version_in_examples(_snake_case )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
_A = '''🤗 Transformers currently provides the following architectures'''
_A = '''1. Want to contribute a new model?'''
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
# Find the start of the list.
_A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_A = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_snake_case )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
_A = f.read()
_A = REPLACE_PATTERNS['''init'''][0].search(_snake_case ).groups()[0]
return packaging.version.parse(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any]=False ) -> Dict:
_A = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_A = default_version.base_version
elif patch:
_A = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_A = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_A = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_snake_case ) == 0:
_A = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_snake_case , patch=_snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ ( ) -> str:
_A = get_version()
_A = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_A = current_version.base_version
# Check with the user we got that right.
_A = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_snake_case ) == 0:
_A = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 2 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[List, PIL.Image.Image, torch.Tensor] ) -> List[Any]:
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
_A = [image]
if isinstance(image[0] , PIL.Image.Image ):
_A , _A = image[0].size
_A , _A = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_A = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_A = np.concatenate(_snake_case , axis=0 )
_A = np.array(_snake_case ).astype(np.floataa ) / 255.0
_A = image.transpose(0 , 3 , 1 , 2 )
_A = 2.0 * image - 1.0
_A = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
_A = torch.cat(_snake_case , dim=0 )
return image
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[Any]:
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
_A = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_A , _A = mask[0].size
_A , _A = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
_A = np.concatenate(_snake_case , axis=0 )
_A = mask.astype(np.floataa ) / 255.0
_A = 0
_A = 1
_A = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
_A = torch.cat(_snake_case , dim=0 )
return mask
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : UNetaDModel
a__ : RePaintScheduler
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , __lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __lowerCAmelCase : int = 2_50 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : int = 10 , __lowerCAmelCase : int = 10 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
_A = image
_A = _preprocess_image(__lowerCAmelCase )
_A = original_image.to(device=self.device , dtype=self.unet.dtype )
_A = _preprocess_mask(__lowerCAmelCase )
_A = mask_image.to(device=self.device , dtype=self.unet.dtype )
_A = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_A = original_image.shape
_A = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.device )
_A = eta
_A = self.scheduler.timesteps[0] + 1
_A = generator[0] if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_A = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# compute previous image: x_t -> x_t-1
_A = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_A = self.scheduler.undo_step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = t
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 2 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase_ = {
"""facebook/nllb-large-en-ro""": 1_0_2_4,
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
UpperCAmelCase_ = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Tuple = VOCAB_FILES_NAMES
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = ["input_ids", "attention_mask"]
a__ : List[Any] = NllbTokenizer
a__ : List[int] = []
a__ : List[int] = []
def __init__( self : List[str] , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[int]="<s>" , __lowerCAmelCase : Optional[int]="<unk>" , __lowerCAmelCase : Dict="<pad>" , __lowerCAmelCase : List[str]="<mask>" , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Union[str, Any]=False , **__lowerCAmelCase : int , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = legacy_behaviour
super().__init__(
vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , legacy_behaviour=__lowerCAmelCase , **__lowerCAmelCase , )
_A = vocab_file
_A = False if not self.vocab_file else True
_A = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
_A = {
lang_code: self.convert_tokens_to_ids(__lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A = src_lang if src_lang is not None else '''eng_Latn'''
_A = self.convert_tokens_to_ids(self._src_lang )
_A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self : Any ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self : int , __lowerCAmelCase : str ) -> None:
_A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Dict ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_A = src_lang
_A = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
_A = self.convert_tokens_to_ids(__lowerCAmelCase )
_A = tgt_lang_id
return inputs
def snake_case_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "eng_Latn" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "fra_Latn" , **__lowerCAmelCase : str , ) -> BatchEncoding:
_A = src_lang
_A = tgt_lang
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self : Any ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self : str , __lowerCAmelCase : Dict ) -> None:
_A = self.convert_tokens_to_ids(__lowerCAmelCase )
if self.legacy_behaviour:
_A = []
_A = [self.eos_token_id, self.cur_lang_code]
else:
_A = [self.cur_lang_code]
_A = [self.eos_token_id]
_A = self.convert_ids_to_tokens(self.prefix_tokens )
_A = self.convert_ids_to_tokens(self.suffix_tokens )
_A = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> None:
_A = self.convert_tokens_to_ids(__lowerCAmelCase )
if self.legacy_behaviour:
_A = []
_A = [self.eos_token_id, self.cur_lang_code]
else:
_A = [self.cur_lang_code]
_A = [self.eos_token_id]
_A = self.convert_ids_to_tokens(self.prefix_tokens )
_A = self.convert_ids_to_tokens(self.suffix_tokens )
_A = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 2 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 1 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : torch.FloatTensor
a__ : Optional[torch.FloatTensor] = None
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :List[Any]=0.999 , _snake_case :Optional[int]="cosine" , ) -> Dict:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case :Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case :str ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_A = []
for i in range(_snake_case ):
_A = i / num_diffusion_timesteps
_A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) )
return torch.tensor(_snake_case , dtype=torch.floataa )
class lowerCamelCase__ ( _A , _A):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , __lowerCAmelCase : int = 10_00 , __lowerCAmelCase : str = "fixed_small_log" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[float] = 1.0 , __lowerCAmelCase : str = "epsilon" , __lowerCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
_A = betas_for_alpha_bar(__lowerCAmelCase )
_A = 1.0 - self.betas
_A = torch.cumprod(self.alphas , dim=0 )
_A = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_A = 1.0
# setable values
_A = None
_A = torch.from_numpy(np.arange(0 , __lowerCAmelCase )[::-1].copy() )
_A = variance_type
def snake_case_ ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def snake_case_ ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, torch.device] = None ) -> Optional[Any]:
_A = num_inference_steps
_A = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_A = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_A = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Tuple=None ) -> List[str]:
if prev_timestep is None:
_A = t - 1
_A = self.alphas_cumprod[t]
_A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_A = 1 - alpha_prod_t
_A = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_A = self.betas[t]
else:
_A = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_A = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_A = torch.log(torch.clamp(__lowerCAmelCase , min=1E-20 ) )
_A = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_A = variance.log()
_A = beta.log()
_A = (predicted_variance + 1) / 2
_A = frac * max_log + (1 - frac) * min_log
return variance
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
_A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_A , _A = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
_A = None
# 1. compute alphas, betas
if prev_timestep is None:
_A = t - 1
_A = self.alphas_cumprod[t]
_A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_A = 1 - alpha_prod_t
_A = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_A = self.betas[t]
_A = self.alphas[t]
else:
_A = 1 - alpha_prod_t / alpha_prod_t_prev
_A = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A = torch.clamp(
__lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_A = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_A = 0
if t > 0:
_A = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase , device=model_output.device )
_A = self._get_variance(
__lowerCAmelCase , predicted_variance=__lowerCAmelCase , prev_timestep=__lowerCAmelCase , )
if self.variance_type == "fixed_small_log":
_A = variance
elif self.variance_type == "learned_range":
_A = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
_A = variance * variance_noise
_A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def snake_case_ ( self : List[str] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
_A = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
_A = timesteps.to(original_samples.device )
_A = alphas_cumprod[timesteps] ** 0.5
_A = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_A = sqrt_alpha_prod.unsqueeze(-1 )
_A = (1 - alphas_cumprod[timesteps]) ** 0.5
_A = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_A = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_A = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 2 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> Dict:
# A local function to see if a dot lands in the circle.
def is_in_circle(_snake_case :float , _snake_case :float ) -> bool:
_A = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_snake_case ) )
# The ratio of the area for circle to square is pi/4.
_A = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :Callable[[float], float] , _snake_case :float = 0.0 , _snake_case :float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(_snake_case , _snake_case ) ) for _ in range(_snake_case ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :float = 0.0 , _snake_case :float = 1.0 ) -> None:
def identity_function(_snake_case :float ) -> float:
return x
_A = area_under_curve_estimator(
_snake_case , _snake_case , _snake_case , _snake_case )
_A = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> None:
def function_to_integrate(_snake_case :float ) -> float:
return sqrt(4.0 - x * x )
_A = area_under_curve_estimator(
_snake_case , _snake_case , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_snake_case ) == 1:
return True
_A = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_A = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str = "geglu" , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "layer_norm" , __lowerCAmelCase : bool = False , ) -> Tuple:
super().__init__()
_A = only_cross_attention
_A = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
_A = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_A = AdaLayerNorm(__lowerCAmelCase , __lowerCAmelCase )
elif self.use_ada_layer_norm_zero:
_A = AdaLayerNormZero(__lowerCAmelCase , __lowerCAmelCase )
else:
_A = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase )
_A = Attention(
query_dim=__lowerCAmelCase , heads=__lowerCAmelCase , dim_head=__lowerCAmelCase , dropout=__lowerCAmelCase , bias=__lowerCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__lowerCAmelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_A = (
AdaLayerNorm(__lowerCAmelCase , __lowerCAmelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase )
)
_A = Attention(
query_dim=__lowerCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__lowerCAmelCase , dim_head=__lowerCAmelCase , dropout=__lowerCAmelCase , bias=__lowerCAmelCase , upcast_attention=__lowerCAmelCase , ) # is self-attn if encoder_hidden_states is none
else:
_A = None
_A = None
# 3. Feed-forward
_A = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase )
_A = FeedForward(__lowerCAmelCase , dropout=__lowerCAmelCase , activation_fn=__lowerCAmelCase , final_dropout=__lowerCAmelCase )
# let chunk size default to None
_A = None
_A = 0
def snake_case_ ( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ) -> Union[str, Any]:
# Sets chunk feed-forward
_A = chunk_size
_A = dim
def snake_case_ ( self : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.LongTensor] = None , __lowerCAmelCase : Dict[str, Any] = None , __lowerCAmelCase : Optional[torch.LongTensor] = None , ) -> Dict:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_A = self.norma(__lowerCAmelCase , __lowerCAmelCase )
elif self.use_ada_layer_norm_zero:
_A , _A , _A , _A , _A = self.norma(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hidden_dtype=hidden_states.dtype )
else:
_A = self.norma(__lowerCAmelCase )
_A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_A = self.attna(
__lowerCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
if self.use_ada_layer_norm_zero:
_A = gate_msa.unsqueeze(1 ) * attn_output
_A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_A = (
self.norma(__lowerCAmelCase , __lowerCAmelCase ) if self.use_ada_layer_norm else self.norma(__lowerCAmelCase )
)
_A = self.attna(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
_A = attn_output + hidden_states
# 3. Feed-forward
_A = self.norma(__lowerCAmelCase )
if self.use_ada_layer_norm_zero:
_A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
_A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_A = torch.cat(
[self.ff(__lowerCAmelCase ) for hid_slice in norm_hidden_states.chunk(__lowerCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_A = self.ff(__lowerCAmelCase )
if self.use_ada_layer_norm_zero:
_A = gate_mlp.unsqueeze(1 ) * ff_output
_A = ff_output + hidden_states
return hidden_states
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 4 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : str = "geglu" , __lowerCAmelCase : bool = False , ) -> int:
super().__init__()
_A = int(dim * mult )
_A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_A = GELU(__lowerCAmelCase , __lowerCAmelCase )
if activation_fn == "gelu-approximate":
_A = GELU(__lowerCAmelCase , __lowerCAmelCase , approximate='''tanh''' )
elif activation_fn == "geglu":
_A = GEGLU(__lowerCAmelCase , __lowerCAmelCase )
elif activation_fn == "geglu-approximate":
_A = ApproximateGELU(__lowerCAmelCase , __lowerCAmelCase )
_A = nn.ModuleList([] )
# project in
self.net.append(__lowerCAmelCase )
# project dropout
self.net.append(nn.Dropout(__lowerCAmelCase ) )
# project out
self.net.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowerCAmelCase ) )
def snake_case_ ( self : Tuple , __lowerCAmelCase : List[Any] ) -> Dict:
for module in self.net:
_A = module(__lowerCAmelCase )
return hidden_states
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : str = "none" ) -> Optional[Any]:
super().__init__()
_A = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_A = approximate
def snake_case_ ( self : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
if gate.device.type != "mps":
return F.gelu(__lowerCAmelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def snake_case_ ( self : int , __lowerCAmelCase : str ) -> List[Any]:
_A = self.proj(__lowerCAmelCase )
_A = self.gelu(__lowerCAmelCase )
return hidden_states
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Any:
super().__init__()
_A = nn.Linear(__lowerCAmelCase , dim_out * 2 )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
if gate.device.type != "mps":
return F.gelu(__lowerCAmelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Any ) -> Optional[int]:
_A , _A = self.proj(__lowerCAmelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__lowerCAmelCase )
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Optional[int]:
super().__init__()
_A = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : int , __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
_A = self.proj(__lowerCAmelCase )
return x * torch.sigmoid(1.702 * x )
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ) -> str:
super().__init__()
_A = nn.Embedding(__lowerCAmelCase , __lowerCAmelCase )
_A = nn.SiLU()
_A = nn.Linear(__lowerCAmelCase , embedding_dim * 2 )
_A = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ) -> int:
_A = self.linear(self.silu(self.emb(__lowerCAmelCase ) ) )
_A , _A = torch.chunk(__lowerCAmelCase , 2 )
_A = self.norm(__lowerCAmelCase ) * (1 + scale) + shift
return x
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ) -> str:
super().__init__()
_A = CombinedTimestepLabelEmbeddings(__lowerCAmelCase , __lowerCAmelCase )
_A = nn.SiLU()
_A = nn.Linear(__lowerCAmelCase , 6 * embedding_dim , bias=__lowerCAmelCase )
_A = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase , eps=1E-6 )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str=None ) -> Optional[Any]:
_A = self.linear(self.silu(self.emb(__lowerCAmelCase , __lowerCAmelCase , hidden_dtype=__lowerCAmelCase ) ) )
_A , _A , _A , _A , _A , _A = emb.chunk(6 , dim=1 )
_A = self.norm(__lowerCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : float = 1E-5 ) -> Optional[Any]:
super().__init__()
_A = num_groups
_A = eps
if act_fn is None:
_A = None
else:
_A = get_activation(__lowerCAmelCase )
_A = nn.Linear(__lowerCAmelCase , out_dim * 2 )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
if self.act:
_A = self.act(__lowerCAmelCase )
_A = self.linear(__lowerCAmelCase )
_A = emb[:, :, None, None]
_A , _A = emb.chunk(2 , dim=1 )
_A = F.group_norm(__lowerCAmelCase , self.num_groups , eps=self.eps )
_A = x * (1 + scale) + shift
return x
| 2 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_A = QuantumRegister(_snake_case , '''qr''' )
_A = ClassicalRegister(_snake_case , '''cr''' )
_A = QuantumCircuit(_snake_case , _snake_case )
_A = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_A = Aer.get_backend('''qasm_simulator''' )
_A = execute(_snake_case , _snake_case , shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 2 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """https://openaipublic.azureedge.net/jukebox/models/"""
UpperCAmelCase_ = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] ) -> Dict:
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
_A = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
_A = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
_A = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
_A = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
_A = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
_A = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_A = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
_A = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :str , _snake_case :Any , _snake_case :Optional[int] ) -> Optional[Any]:
_A = {}
import re
_A = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
_A = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_A = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
_A = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
_A = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_A = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
_A = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
_A = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_A = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_snake_case ):
_A = re_encoder_block_conv_in.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[2] ) * 2 + int(groups[3] )
_A = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
_A = re_encoder_block_conv_in.sub(_snake_case , _snake_case )
elif re_encoder_block_resnet.fullmatch(_snake_case ):
_A = re_encoder_block_resnet.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[2] ) * 2 + int(groups[3] )
_A = {'''1''': 1, '''3''': 2}[groups[-2]]
_A = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
_A = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A = prefix + resnet_block
_A = re_encoder_block_resnet.sub(_snake_case , _snake_case )
elif re_encoder_block_proj_out.fullmatch(_snake_case ):
_A = re_encoder_block_proj_out.match(_snake_case )
_A = regex_match.groups()
_A = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
_A = re_encoder_block_proj_out.sub(_snake_case , _snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_snake_case ):
_A = re_decoder_block_conv_out.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[2] ) * 2 + int(groups[3] ) - 2
_A = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
_A = re_decoder_block_conv_out.sub(_snake_case , _snake_case )
elif re_decoder_block_resnet.fullmatch(_snake_case ):
_A = re_decoder_block_resnet.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[2] ) * 2 + int(groups[3] ) - 2
_A = {'''1''': 1, '''3''': 2}[groups[-2]]
_A = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
_A = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A = prefix + resnet_block
_A = re_decoder_block_resnet.sub(_snake_case , _snake_case )
elif re_decoder_block_proj_in.fullmatch(_snake_case ):
_A = re_decoder_block_proj_in.match(_snake_case )
_A = regex_match.groups()
_A = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
_A = re_decoder_block_proj_in.sub(_snake_case , _snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_snake_case ):
_A = re_prior_cond_conv_out.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[1] ) * 2 + int(groups[2] ) - 2
_A = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
_A = re_prior_cond_conv_out.sub(_snake_case , _snake_case )
elif re_prior_cond_resnet.fullmatch(_snake_case ):
_A = re_prior_cond_resnet.match(_snake_case )
_A = regex_match.groups()
_A = int(groups[1] ) * 2 + int(groups[2] ) - 2
_A = {'''1''': 1, '''3''': 2}[groups[-2]]
_A = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
_A = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_A = prefix + resnet_block
_A = re_prior_cond_resnet.sub(_snake_case , _snake_case )
elif re_prior_cond_proj_in.fullmatch(_snake_case ):
_A = re_prior_cond_proj_in.match(_snake_case )
_A = regex_match.groups()
_A = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
_A = re_prior_cond_proj_in.sub(_snake_case , _snake_case )
# keep original key
else:
_A = original_key
_A = replace_key(_snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
_A = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
_A = original_key
_A = original_key
_A = value
return new_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any]=None , _snake_case :Union[str, Any]=None ) -> Dict:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
_A = requests.get(F'''{PREFIX}{file}''' , allow_redirects=_snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=_snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
_A = MODEL_MAPPING[model_name.split('''/''' )[-1]]
_A = JukeboxConfig.from_pretrained(_snake_case )
_A = JukeboxModel(_snake_case )
_A = []
_A = {}
for i, dict_name in enumerate(_snake_case ):
_A = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
_A = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
_A = old_dic[k]
elif k.endswith('''.w''' ):
_A = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_A = old_dic[k]
else:
_A = old_dic[k]
_A = '''vqvae''' if i == 0 else F'''priors.{3 - i}'''
_A = fix_jukebox_keys(_snake_case , model.state_dict() , _snake_case , _snake_case )
weight_dict.append(_snake_case )
_A = weight_dict.pop(0 )
model.vqvae.load_state_dict(_snake_case )
for i in range(len(_snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(_snake_case , _snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
UpperCAmelCase_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_A = getattr(_snake_case , _snake_case )
if weight_type is not None:
_A = getattr(_snake_case , _snake_case ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_snake_case )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any:
_A = full_name.split('''conv_layers.''' )[-1]
_A = name.split('''.''' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple:
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1E-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(_snake_case )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]:
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(_snake_case )
else:
_A = convert_config(model[0] , _snake_case )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
_A = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
_A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
_A = SEWForCTC(_snake_case )
else:
_A = SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 2 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> Dict:
_A = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
_A = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_A = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
_A = '''.'''.join(_snake_case )
return test_module_path
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] ) -> Any:
_A = get_module_path(_snake_case )
_A = importlib.import_module(_snake_case )
return test_module
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> int:
_A = []
_A = get_test_module(_snake_case )
for attr in dir(_snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(_snake_case , _snake_case ) )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> Union[str, Any]:
_A = []
_A = get_test_module(_snake_case )
for attr in dir(_snake_case ):
_A = getattr(_snake_case , _snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_A = getattr(_snake_case , '''all_model_classes''' , [] )
if len(_snake_case ) > 0:
test_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Union[str, Any]:
_A = get_test_classes(_snake_case )
_A = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Optional[int]:
_A = test_class()
if hasattr(_snake_case , '''setUp''' ):
test.setUp()
_A = None
if hasattr(_snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_A = test.model_tester.__class__
return model_tester
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Tuple ) -> List[str]:
_A = get_test_classes(_snake_case )
_A = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :List[Any] ) -> Dict:
_A = get_test_classes_for_model(_snake_case , _snake_case )
_A = []
for test_class in test_classes:
_A = get_model_tester_from_test_class(_snake_case )
if tester_class is not None:
tester_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Tuple:
_A = get_test_classes(_snake_case )
_A = {test_class: get_model_tester_from_test_class(_snake_case ) for test_class in test_classes}
return test_tester_mapping
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> Any:
_A = get_model_classes(_snake_case )
_A = {
model_class: get_test_classes_for_model(_snake_case , _snake_case ) for model_class in model_classes
}
return model_test_mapping
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] ) -> int:
_A = get_model_classes(_snake_case )
_A = {
model_class: get_tester_classes_for_model(_snake_case , _snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> List[Any]:
if isinstance(_snake_case , _snake_case ):
return o
elif isinstance(_snake_case , _snake_case ):
return o.__name__
elif isinstance(_snake_case , (list, tuple) ):
return [to_json(_snake_case ) for x in o]
elif isinstance(_snake_case , _snake_case ):
return {to_json(_snake_case ): to_json(_snake_case ) for k, v in o.items()}
else:
return o
| 2 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def snake_case_ ( self : int ) -> Optional[int]:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def snake_case_ ( self : Optional[int] ) -> int:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 2 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Optional[int] = KandinskyVaaControlnetPipeline
a__ : Any = ["image_embeds", "negative_image_embeds", "hint"]
a__ : str = ["image_embeds", "negative_image_embeds", "hint"]
a__ : List[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a__ : Union[str, Any] = False
@property
def snake_case_ ( self : str ) -> List[Any]:
return 32
@property
def snake_case_ ( self : Optional[int] ) -> Optional[int]:
return 32
@property
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
return self.time_input_dim
@property
def snake_case_ ( self : str ) -> Any:
return self.time_input_dim * 4
@property
def snake_case_ ( self : Optional[int] ) -> Tuple:
return 1_00
@property
def snake_case_ ( self : Tuple ) -> Union[str, Any]:
torch.manual_seed(0 )
_A = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_A = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def snake_case_ ( self : Tuple ) -> List[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self : int ) -> List[str]:
torch.manual_seed(0 )
_A = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self : List[str] ) -> Dict:
_A = self.dummy_unet
_A = self.dummy_movq
_A = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__lowerCAmelCase , )
_A = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=0 ) -> Optional[Any]:
_A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCAmelCase )
# create hint
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_A = torch.manual_seed(__lowerCAmelCase )
else:
_A = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_A = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def snake_case_ ( self : Any ) -> Union[str, Any]:
_A = '''cpu'''
_A = self.get_dummy_components()
_A = self.pipeline_class(**__lowerCAmelCase )
_A = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
_A = output.images
_A = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Any ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self : Tuple ) -> List[Any]:
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
_A = torch.from_numpy(np.array(__lowerCAmelCase ) ).float() / 255.0
_A = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_A = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
_A = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
_A = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
_A = '''A robot, 4k photo'''
_A = torch.Generator(device='''cuda''' ).manual_seed(0 )
_A , _A = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_A = torch.Generator(device='''cuda''' ).manual_seed(0 )
_A = pipeline(
image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , hint=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=1_00 , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 2 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = tempfile.mkdtemp()
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ) -> Optional[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> List[str]:
_A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Optional[Any] ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2 | 1 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2 | 1 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def snake_case_ ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict:
if not batched:
_A = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Any:
_A = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def snake_case_ ( self : List[str] ) -> int:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 2 | 1 |
UpperCAmelCase_ = """Input must be a string of 8 numbers plus letter"""
UpperCAmelCase_ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bool:
if not isinstance(_snake_case , _snake_case ):
_A = F'''Expected string as input, found {type(_snake_case ).__name__}'''
raise TypeError(_snake_case )
_A = spanish_id.replace('''-''' , '''''' ).upper()
if len(_snake_case ) != 9:
raise ValueError(_snake_case )
try:
_A = int(spanish_id_clean[0:8] )
_A = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_snake_case ) from ex
if letter.isdigit():
raise ValueError(_snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[int]=30 , __lowerCAmelCase : Optional[int]=4_00 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[Any]=0.9 , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ) -> List[Any]:
_A = size if size is not None else {'''shortest_edge''': 30}
_A = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize_and_center_crop
_A = size
_A = crop_pct
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
def snake_case_ ( self : Tuple ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Dict = PoolFormerImageProcessor if is_vision_available() else None
def snake_case_ ( self : List[str] ) -> Optional[int]:
_A = PoolFormerImageProcessingTester(self )
@property
def snake_case_ ( self : List[Any] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
def snake_case_ ( self : List[Any] ) -> Optional[int]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
_A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case_ ( self : List[Any] ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> List[Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : Any ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : int ) -> List[str]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 2 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ) -> Optional[Any]:
_A = tempfile.mkdtemp()
_A = BlipImageProcessor()
_A = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
_A = BlipProcessor(__lowerCAmelCase , __lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : Tuple ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer
def snake_case_ ( self : List[Any] , **__lowerCAmelCase : int ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def snake_case_ ( self : Any ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : List[Any] ) -> str:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Any ) -> Union[str, Any]:
_A = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Union[str, Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : Union[str, Any] ) -> Union[str, Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Any ) -> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : List[str] ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 2 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCAmelCase_ = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "dhaka" , _snake_case :int = 5 ) -> int:
_A = min(_snake_case , 50 ) # Prevent abuse!
_A = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_A = requests.get('''https://www.google.com/search''' , params=_snake_case , headers=_snake_case )
_A = BeautifulSoup(html.text , '''html.parser''' )
_A = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
_A = json.dumps(_snake_case )
_A = json.loads(_snake_case )
_A = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , _snake_case , )
if not matched_google_image_data:
return 0
_A = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(_snake_case ) , )
_A = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , _snake_case , )
for index, fixed_full_res_image in enumerate(_snake_case ):
if index >= max_images:
return index
_A = bytes(_snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
_A = bytes(_snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
_A = urllib.request.build_opener()
_A = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(_snake_case )
_A = F'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
urllib.request.urlretrieve( # noqa: S310
_snake_case , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
UpperCAmelCase_ = download_images_from_google_query(sys.argv[1])
print(f'{image_count} images were downloaded to disk.')
except IndexError:
print("""Please provide a search term.""")
raise
| 2 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : int=3 , __lowerCAmelCase : int=18 , __lowerCAmelCase : Tuple=30 , __lowerCAmelCase : List[str]=4_00 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Tuple=False , ) -> List[str]:
_A = size if size is not None else {'''height''': 20, '''width''': 20}
_A = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_center_crop
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_reduce_labels
def snake_case_ ( self : Optional[int] ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A = Image.open(dataset[0]['''file'''] )
_A = Image.open(dataset[1]['''file'''] )
return image, map
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_A = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A = Image.open(ds[0]['''file'''] )
_A = Image.open(ds[1]['''file'''] )
_A = Image.open(ds[2]['''file'''] )
_A = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : List[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[Any] ) -> Optional[Any]:
_A = BeitImageProcessingTester(self )
@property
def snake_case_ ( self : Dict ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : int ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
def snake_case_ ( self : int ) -> List[str]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def snake_case_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : int ) -> str:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
_A = []
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_A = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
_A , _A = prepare_semantic_single_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
_A , _A = prepare_semantic_batch_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def snake_case_ ( self : List[str] ) -> Dict:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_A , _A = prepare_semantic_single_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
_A = True
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 2 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : str=18 , __lowerCAmelCase : Union[str, Any]=30 , __lowerCAmelCase : Optional[Any]=4_00 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=True , ) -> Dict:
_A = size if size is not None else {'''height''': 18, '''width''': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
def snake_case_ ( self : Dict ) -> Union[str, Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Union[str, Any] = ImageGPTImageProcessor if is_vision_available() else None
def snake_case_ ( self : List[str] ) -> str:
_A = ImageGPTImageProcessingTester(self )
@property
def snake_case_ ( self : List[Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''clusters''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
def snake_case_ ( self : List[Any] ) -> Any:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def snake_case_ ( self : str ) -> Optional[int]:
_A = self.image_processing_class(**self.image_processor_dict )
_A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , __lowerCAmelCase )
def snake_case_ ( self : int ) -> Tuple:
_A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(__lowerCAmelCase , '''image_processor.json''' )
image_processor_first.to_json_file(__lowerCAmelCase )
_A = self.image_processing_class.from_json_file(__lowerCAmelCase ).to_dict()
_A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __lowerCAmelCase )
def snake_case_ ( self : Dict ) -> int:
_A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__lowerCAmelCase )
_A = self.image_processing_class.from_pretrained(__lowerCAmelCase ).to_dict()
_A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __lowerCAmelCase )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_A = Image.open(dataset[4]['''file'''] )
_A = Image.open(dataset[5]['''file'''] )
_A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def snake_case_ ( self : Optional[Any] ) -> str:
_A = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_A = prepare_images()
# test non-batched
_A = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_A = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __lowerCAmelCase )
# test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_A = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __lowerCAmelCase )
| 2 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 | 1 |
import datasets
UpperCAmelCase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCAmelCase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCAmelCase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :List[Any] ) -> List[str]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Optional[int]:
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
UpperCAmelCase_ = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
UpperCAmelCase_ = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
UpperCAmelCase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
UpperCAmelCase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
UpperCAmelCase_ = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
UpperCAmelCase_ = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
UpperCAmelCase_ = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
UpperCAmelCase_ = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
UpperCAmelCase_ = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
UpperCAmelCase_ = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
UpperCAmelCase_ = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
UpperCAmelCase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
UpperCAmelCase_ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
UpperCAmelCase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
UpperCAmelCase_ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
UpperCAmelCase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
UpperCAmelCase_ = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
UpperCAmelCase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
UpperCAmelCase_ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
UpperCAmelCase_ = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
UpperCAmelCase_ = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
UpperCAmelCase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
UpperCAmelCase_ = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
UpperCAmelCase_ = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
UpperCAmelCase_ = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
UpperCAmelCase_ = """"""
UpperCAmelCase_ = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
UpperCAmelCase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
UpperCAmelCase_ = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Tuple ) -> List[str]:
assert ReadMe.from_string(_snake_case , _snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :Any ) -> Union[str, Any]:
with pytest.raises(_snake_case , match=re.escape(expected_error.format(path='''root''' ) ) ):
_A = ReadMe.from_string(_snake_case , _snake_case )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :Union[str, Any] ) -> Tuple:
with pytest.raises(_snake_case , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(_snake_case , _snake_case )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> Union[str, Any]:
ReadMe.from_string(_snake_case , _snake_case , suppress_parsing_errors=_snake_case )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Dict ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_snake_case ) / '''README.md'''
with open(_snake_case , '''w+''' ) as readme_file:
readme_file.write(_snake_case )
_A = ReadMe.from_readme(_snake_case , _snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :str ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_snake_case ) / '''README.md'''
with open(_snake_case , '''w+''' ) as readme_file:
readme_file.write(_snake_case )
_A = expected_error.format(path=_snake_case )
with pytest.raises(_snake_case , match=re.escape(_snake_case ) ):
_A = ReadMe.from_readme(_snake_case , _snake_case )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :str ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_snake_case ) / '''README.md'''
with open(_snake_case , '''w+''' ) as readme_file:
readme_file.write(_snake_case )
_A = expected_error.format(path=_snake_case )
with pytest.raises(_snake_case , match=re.escape(_snake_case ) ):
ReadMe.from_readme(_snake_case , _snake_case )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_snake_case ) / '''README.md'''
with open(_snake_case , '''w+''' ) as readme_file:
readme_file.write(_snake_case )
ReadMe.from_readme(_snake_case , _snake_case , suppress_parsing_errors=_snake_case )
| 2 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCAmelCase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCAmelCase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print("""\n""".join(upper_files) + """\n""")
UpperCAmelCase_ = [file for file in filepaths if """ """ in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print("""\n""".join(space_files) + """\n""")
UpperCAmelCase_ = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print("""\n""".join(hyphen_files) + """\n""")
UpperCAmelCase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print("""\n""".join(nodir_files) + """\n""")
UpperCAmelCase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 2 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=18 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Any=4_00 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[Any]=True , ) -> Optional[Any]:
_A = size if size is not None else {'''shortest_edge''': 20}
_A = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_center_crop
_A = crop_size
_A = do_flip_channel_order
def snake_case_ ( self : Any ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Tuple = MobileViTImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Tuple:
_A = MobileViTImageProcessingTester(self )
@property
def snake_case_ ( self : List[Any] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_flip_channel_order''' ) )
def snake_case_ ( self : int ) -> Optional[int]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
pass
def snake_case_ ( self : str ) -> Any:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : Optional[int] ) -> str:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 2 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "ctrl"
a__ : Tuple = ["past_key_values"]
a__ : Union[str, Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , __lowerCAmelCase : Tuple=24_65_34 , __lowerCAmelCase : str=2_56 , __lowerCAmelCase : int=12_80 , __lowerCAmelCase : int=81_92 , __lowerCAmelCase : str=48 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=1E-6 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=True , **__lowerCAmelCase : List[Any] , ) -> List[str]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = dff
_A = resid_pdrop
_A = embd_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
super().__init__(**__lowerCAmelCase )
| 2 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :Dict ) -> Any:
_A = b.T
_A = np.sum(np.square(_snake_case ) , axis=1 )
_A = np.sum(np.square(_snake_case ) , axis=0 )
_A = np.matmul(_snake_case , _snake_case )
_A = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Optional[Any] ) -> Optional[Any]:
_A = x.reshape(-1 , 3 )
_A = squared_euclidean_distance(_snake_case , _snake_case )
return np.argmin(_snake_case , axis=1 )
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = ["pixel_values"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> None:
super().__init__(**__lowerCAmelCase )
_A = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
_A = get_size_dict(__lowerCAmelCase )
_A = np.array(__lowerCAmelCase ) if clusters is not None else None
_A = do_resize
_A = size
_A = resample
_A = do_normalize
_A = do_color_quantize
def snake_case_ ( self : List[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : List[Any] , ) -> np.ndarray:
_A = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__lowerCAmelCase , size=(size['''height'''], size['''width''']) , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
_A = rescale(image=__lowerCAmelCase , scale=1 / 127.5 , data_format=__lowerCAmelCase )
_A = image - 1
return image
def snake_case_ ( self : int , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__lowerCAmelCase : int , ) -> PIL.Image.Image:
_A = do_resize if do_resize is not None else self.do_resize
_A = size if size is not None else self.size
_A = get_size_dict(__lowerCAmelCase )
_A = resample if resample is not None else self.resample
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_A = clusters if clusters is not None else self.clusters
_A = np.array(__lowerCAmelCase )
_A = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
_A = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
_A = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_normalize:
_A = [self.normalize(image=__lowerCAmelCase ) for image in images]
if do_color_quantize:
_A = [to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_A = np.array(__lowerCAmelCase )
_A = color_quantize(__lowerCAmelCase , __lowerCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_A = images.shape[0]
_A = images.reshape(__lowerCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_A = list(__lowerCAmelCase )
else:
_A = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
_A = {'''input_ids''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 2 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict=False ) -> Dict:
try:
_A = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_A = default
else:
# KEY is set, convert it to True or False.
try:
_A = strtobool(_snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ = parse_flag_from_env("""RUN_SLOW""", default=False)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict ) -> str:
return unittest.skip('''Test was skipped''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] ) -> Union[str, Any]:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Tuple:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> List[str]:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> Tuple:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] ) -> Optional[Any]:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Optional[Any]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> Union[str, Any]:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Union[str, Any]:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> Dict:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> str:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> Union[str, Any]:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any]=None , _snake_case :Optional[Any]=None ) -> Any:
if test_case is None:
return partial(_snake_case , version=_snake_case )
return unittest.skipUnless(is_torch_version('''>=''' , _snake_case ) , F'''test requires torch version >= {version}''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Dict:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> Optional[Any]:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_snake_case )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] ) -> List[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_snake_case )
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
a__ : Dict = True
@classmethod
def snake_case_ ( cls : str ) -> Any:
_A = tempfile.mkdtemp()
@classmethod
def snake_case_ ( cls : Dict ) -> int:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case_ ( self : str ) -> Optional[Any]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowerCAmelCase )
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ) -> str:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Union[mock.Mock, List[mock.Mock]] ) -> List[str]:
_A = mocks if isinstance(__lowerCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> Optional[int]:
_A = AcceleratorState()
_A = tensor[None].clone().to(state.device )
_A = gather(_snake_case ).cpu()
_A = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _snake_case ):
return False
return True
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Any:
_A = returncode
_A = stdout
_A = stderr
async def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any ) -> Any:
while True:
_A = await stream.readline()
if line:
callback(_snake_case )
else:
break
async def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict=None , _snake_case :Optional[Any]=None , _snake_case :Tuple=None , _snake_case :Optional[int]=False , _snake_case :Optional[Any]=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(_snake_case ) )
_A = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_A = []
_A = []
def tee(_snake_case :Dict , _snake_case :int , _snake_case :Union[str, Any] , _snake_case :Optional[Any]="" ):
_A = line.decode('''utf-8''' ).rstrip()
sink.append(_snake_case )
if not quiet:
print(_snake_case , _snake_case , file=_snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _snake_case : tee(_snake_case , _snake_case , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _snake_case : tee(_snake_case , _snake_case , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=_snake_case , )
return _RunOutput(await p.wait() , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :int=None , _snake_case :Tuple=None , _snake_case :Union[str, Any]=180 , _snake_case :str=False , _snake_case :Dict=True ) -> _RunOutput:
_A = asyncio.get_event_loop()
_A = loop.run_until_complete(
_stream_subprocess(_snake_case , env=_snake_case , stdin=_snake_case , timeout=_snake_case , quiet=_snake_case , echo=_snake_case ) )
_A = ''' '''.join(_snake_case )
if result.returncode > 0:
_A = '''\n'''.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class lowerCamelCase__ ( _A):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :List[str]=False ) -> List[Any]:
try:
_A = subprocess.check_output(_snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_snake_case , '''decode''' ):
_A = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(_snake_case )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 2 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCAmelCase_ = """\
Text data.
Second line of data."""
UpperCAmelCase_ = """file"""
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] ) -> Optional[int]:
_A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_A = bytes(_snake_case , '''utf-8''' )
with zstd.open(_snake_case , '''wb''' ) as f:
f.write(_snake_case )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> Any:
with open(os.path.join(tmpfs.local_root_dir , _snake_case ) , '''w''' ) as f:
f.write(_snake_case )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :str , _snake_case :str , _snake_case :List[str] , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
_A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_A = input_paths[compression_format]
_A = tmp_path / '''cache'''
_A = DownloadConfig(cache_dir=_snake_case , extract_compressed_file=_snake_case )
_A = cached_path(_snake_case , download_config=_snake_case )
with open(_snake_case ) as f:
_A = f.read()
with open(_snake_case ) as f:
_A = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :Tuple , _snake_case :Dict , _snake_case :Union[str, Any] , _snake_case :List[Any] ) -> str:
_A = '''custom_cache'''
_A = '''custom_extracted_dir'''
_A = tmp_path / '''custom_extracted_path'''
if default_extracted:
_A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _snake_case )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_snake_case ) )
_A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_A = xz_file
_A = (
DownloadConfig(extract_compressed_file=_snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_snake_case )
)
_A = cached_path(_snake_case , download_config=_snake_case )
assert Path(_snake_case ).parent.parts[-2:] == expected
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> Tuple:
# absolute path
_A = str(Path(_snake_case ).resolve() )
assert cached_path(_snake_case ) == text_file
# relative path
_A = str(Path(_snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_snake_case ) == text_file
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] ) -> List[Any]:
# absolute path
_A = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_snake_case ):
cached_path(_snake_case )
# relative path
_A = '''./__missing_file__.txt'''
with pytest.raises(_snake_case ):
cached_path(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict ) -> Any:
_A = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(_snake_case ) as f:
_A = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _snake_case )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
with pytest.raises(_snake_case ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> Dict:
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_snake_case ):
http_get('''https://huggingface.co''' , temp_file=_snake_case )
with pytest.raises(_snake_case ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Union[str, Any]:
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_snake_case ):
ftp_get('''ftp://huggingface.co''' , temp_file=_snake_case )
with pytest.raises(_snake_case ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Optional[int]:
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_snake_case ):
fsspec_get('''s3://huggingface.co''' , temp_file=_snake_case )
with pytest.raises(_snake_case ):
fsspec_head('''s3://huggingface.co''' )
| 2 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str=10 ) -> Optional[Any]:
_A = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict=10 ) -> Optional[int]:
_A = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
_A = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ) -> Any:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase )
def snake_case_ ( self : int ) -> Union[str, Any]:
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
_A = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def snake_case_ ( self : int ) -> Union[str, Any]:
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCAmelCase , weight_decay=0.0 , relative_step=__lowerCAmelCase , scale_parameter=__lowerCAmelCase , warmup_init=__lowerCAmelCase , )
for _ in range(10_00 ):
_A = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
a__ : Optional[Any] = nn.Linear(50 , 50) if is_torch_available() else None
a__ : Dict = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
a__ : List[Any] = 10
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=None ) -> List[Any]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase , msg=__lowerCAmelCase )
def snake_case_ ( self : Any ) -> str:
_A = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_A , _A = data
_A = scheduler_func(self.optimizer , **__lowerCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_A = unwrap_schedule(__lowerCAmelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCAmelCase , __lowerCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_A = scheduler_func(self.optimizer , **__lowerCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCAmelCase ) # wrap to test picklability of the schedule
_A = unwrap_and_save_reload_schedule(__lowerCAmelCase , self.num_steps )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : Any ) -> List[Any]:
_A = fn
def __call__( self : Union[str, Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : List[str] ) -> Dict:
return self.fn(*__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def snake_case_ ( self : Any , __lowerCAmelCase : Optional[Any] ) -> List[str]:
_A = list(map(self , scheduler.lr_lambdas ) )
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def SCREAMING_SNAKE_CASE_ ( ) -> None:
print('''Making key files...''' )
make_key_files('''rsa''' , 1_024 )
print('''Key files generation successful.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> tuple[tuple[int, int], tuple[int, int]]:
print('''Generating prime p...''' )
_A = rabinMiller.generate_large_prime(_snake_case )
print('''Generating prime q...''' )
_A = rabinMiller.generate_large_prime(_snake_case )
_A = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
_A = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_snake_case , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
_A = cryptoMath.find_mod_inverse(_snake_case , (p - 1) * (q - 1) )
_A = (n, e)
_A = (n, d)
return (public_key, private_key)
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
_A , _A = generate_key(_snake_case )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , '''w''' ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , '''w''' ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_snake_case ) == 1:
return True
_A = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_A = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : str ) -> Any:
_A = tempfile.mkdtemp()
_A = SamImageProcessor()
_A = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Any , **__lowerCAmelCase : Any ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def snake_case_ ( self : Tuple ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Any ) -> Dict:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Union[str, Any] ) -> int:
_A = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> Tuple:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def snake_case_ ( self : Dict ) -> Any:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = [torch.ones((1, 3, 5, 5) )]
_A = [[17_64, 26_46]]
_A = [[6_83, 10_24]]
_A = processor.post_process_masks(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_A = processor.post_process_masks(
__lowerCAmelCase , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
_A = [np.ones((1, 3, 5, 5) )]
_A = processor.post_process_masks(__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_A = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCAmelCase ):
_A = processor.post_process_masks(__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) )
@require_vision
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : int ) -> Any:
_A = tempfile.mkdtemp()
_A = SamImageProcessor()
_A = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : int , **__lowerCAmelCase : Optional[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def snake_case_ ( self : List[str] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : List[Any] ) -> int:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : str ) -> Tuple:
_A = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Optional[int]:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def snake_case_ ( self : Optional[int] ) -> Any:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = [tf.ones((1, 3, 5, 5) )]
_A = [[17_64, 26_46]]
_A = [[6_83, 10_24]]
_A = processor.post_process_masks(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_A = processor.post_process_masks(
__lowerCAmelCase , tf.convert_to_tensor(__lowerCAmelCase ) , tf.convert_to_tensor(__lowerCAmelCase ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
_A = [np.ones((1, 3, 5, 5) )]
_A = processor.post_process_masks(
__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_A = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_A = processor.post_process_masks(
__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Optional[int] ) -> str:
_A = tempfile.mkdtemp()
_A = SamImageProcessor()
_A = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : List[str] , **__lowerCAmelCase : Union[str, Any] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def snake_case_ ( self : List[str] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : str ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def snake_case_ ( self : Any ) -> str:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_A = [tf.convert_to_tensor(__lowerCAmelCase )]
_A = [torch.tensor(__lowerCAmelCase )]
_A = [[17_64, 26_46]]
_A = [[6_83, 10_24]]
_A = processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors='''tf''' )
_A = processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def snake_case_ ( self : Optional[int] ) -> List[Any]:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''pt''' )['''pixel_values'''].numpy()
_A = processor(images=__lowerCAmelCase , return_tensors='''pt''' )['''pixel_values'''].numpy()
_A = image_processor(__lowerCAmelCase , return_tensors='''tf''' )['''pixel_values'''].numpy()
_A = processor(images=__lowerCAmelCase , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
| 2 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_A = QuantumRegister(_snake_case , '''qr''' )
_A = ClassicalRegister(_snake_case , '''cr''' )
_A = QuantumCircuit(_snake_case , _snake_case )
_A = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_A = Aer.get_backend('''qasm_simulator''' )
_A = execute(_snake_case , _snake_case , shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 2 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> Optional[int]:
# word like '180' or '身高' or '神'
for char in word:
_A = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> List[Any]:
_A = set()
for token in tokens:
_A = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
_A = list(_snake_case )
return word_list
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :set() ) -> Optional[int]:
if not chinese_word_set:
return bert_tokens
_A = max([len(_snake_case ) for w in chinese_word_set] )
_A = bert_tokens
_A , _A = 0, len(_snake_case )
while start < end:
_A = True
if is_chinese(bert_word[start] ):
_A = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
_A = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_A = '''##''' + bert_word[j]
_A = start + i
_A = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :LTP , _snake_case :BertTokenizer ) -> int:
_A = []
for i in range(0 , len(_snake_case ) , 100 ):
_A = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_A = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
_A = []
for i in range(0 , len(_snake_case ) , 100 ):
_A = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_snake_case ) == len(_snake_case )
_A = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
_A = []
for id in input_ids:
_A = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
_A = add_sub_symbol(_snake_case , _snake_case )
_A = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
_A = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Optional[int]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
_A = f.readlines()
_A = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_A = LTP(args.ltp ) # faster in GPU device
_A = BertTokenizer.from_pretrained(args.bert )
_A = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
_A = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
UpperCAmelCase_ = parser.parse_args()
main(args)
| 2 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_A = getattr(_snake_case , _snake_case )
if weight_type is not None:
_A = getattr(_snake_case , _snake_case ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_snake_case )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any:
_A = full_name.split('''conv_layers.''' )[-1]
_A = name.split('''.''' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple:
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1E-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(_snake_case )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]:
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(_snake_case )
else:
_A = convert_config(model[0] , _snake_case )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
_A = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
_A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
_A = SEWForCTC(_snake_case )
else:
_A = SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 2 | 1 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def snake_case_ ( self : int ) -> Optional[int]:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def snake_case_ ( self : Optional[int] ) -> int:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 2 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Dict , **__lowerCAmelCase : Union[str, Any] ) -> Dict:
super().__init__(**__lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : Tuple , __lowerCAmelCase : Union[np.ndarray, bytes, str] , **__lowerCAmelCase : Union[str, Any] ) -> Dict:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , **__lowerCAmelCase : Optional[int] ) -> Optional[int]:
_A = {}
if "candidate_labels" in kwargs:
_A = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
_A = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def snake_case_ ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str="This is a sound of {}." ) -> Optional[Any]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_A = requests.get(__lowerCAmelCase ).content
else:
with open(__lowerCAmelCase , '''rb''' ) as f:
_A = f.read()
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = ffmpeg_read(__lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(__lowerCAmelCase , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
_A = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
_A = candidate_labels
_A = [hypothesis_template.format(__lowerCAmelCase ) for x in candidate_labels]
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework , padding=__lowerCAmelCase )
_A = [text_inputs]
return inputs
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Dict:
_A = model_inputs.pop('''candidate_labels''' )
_A = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __lowerCAmelCase ):
_A = text_inputs[0]
else:
# Batching case.
_A = text_inputs[0][0]
_A = self.model(**__lowerCAmelCase , **__lowerCAmelCase )
_A = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def snake_case_ ( self : Tuple , __lowerCAmelCase : List[str] ) -> Tuple:
_A = model_outputs.pop('''candidate_labels''' )
_A = model_outputs['''logits'''][0]
if self.framework == "pt":
_A = logits.softmax(dim=0 )
_A = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
_A = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : -x[0] )
]
return result
| 2 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = tempfile.mkdtemp()
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ) -> Optional[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> List[str]:
_A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Optional[Any] ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@property
def snake_case_ ( self : List[str] ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self : Dict ) -> Any:
_A = ort.SessionOptions()
_A = False
return options
def snake_case_ ( self : str ) -> List[Any]:
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
_A = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_A = '''A red cat sitting on a park bench'''
_A = np.random.RandomState(0 )
_A = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__lowerCAmelCase , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 2 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = False ) -> Union[str, Any]:
_A = scheduler
_A = optimizers if isinstance(__lowerCAmelCase , (list, tuple) ) else [optimizers]
_A = split_batches
_A = step_with_optimizer
_A = GradientState()
def snake_case_ ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[Any] ) -> Optional[int]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_A = AcceleratorState().num_processes
for _ in range(__lowerCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
else:
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : int ) -> List[Any]:
return self.scheduler.get_last_lr()
def snake_case_ ( self : str ) -> Optional[int]:
return self.scheduler.state_dict()
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
self.scheduler.load_state_dict(__lowerCAmelCase )
def snake_case_ ( self : str ) -> int:
return self.scheduler.get_lr()
def snake_case_ ( self : Optional[Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[Any] ) -> Tuple:
return self.scheduler.print_lr(*__lowerCAmelCase , **__lowerCAmelCase )
| 2 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def snake_case_ ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict:
if not batched:
_A = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Any:
_A = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def snake_case_ ( self : List[str] ) -> int:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 2 | 1 |
import os
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> List[Any]:
_A = len(grid[0] )
_A = len(_snake_case )
_A = 0
_A = 0
_A = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_snake_case ):
for j in range(n_rows - 3 ):
_A = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_A = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_A = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_A = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_A = max(
_snake_case , _snake_case , _snake_case , _snake_case )
if max_product > largest:
_A = max_product
return largest
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_A = []
with open(os.path.dirname(_snake_case ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_A = [[int(_snake_case ) for i in grid[j]] for j in range(len(_snake_case ) )]
return largest_product(_snake_case )
if __name__ == "__main__":
print(solution())
| 2 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2 | 1 |
UpperCAmelCase_ = range(2, 2_0 + 1)
UpperCAmelCase_ = [1_0**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ = {}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :int , _snake_case :str ) -> int:
_A = sum(a_i[j] for j in range(_snake_case , len(_snake_case ) ) )
_A = sum(a_i[j] * base[j] for j in range(min(len(_snake_case ) , _snake_case ) ) )
_A , _A = 0, 0
_A = n - i
_A = memo.get(_snake_case )
if sub_memo is not None:
_A = sub_memo.get(_snake_case )
if jumps is not None and len(_snake_case ) > 0:
# find and make the largest jump without going over
_A = -1
for _k in range(len(_snake_case ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_A = _k
break
if max_jump >= 0:
_A , _A , _A = jumps[max_jump]
# since the difference between jumps is cached, add c
_A = diff + c
for j in range(min(_snake_case , len(_snake_case ) ) ):
_A , _A = divmod(_snake_case , 10 )
if new_c > 0:
add(_snake_case , _snake_case , _snake_case )
else:
_A = []
else:
_A = {c: []}
_A = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_A , _A = next_term(_snake_case , k - 1 , i + dn , _snake_case )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_A , _A = compute(_snake_case , _snake_case , i + dn , _snake_case )
diff += _diff
dn += terms_jumped
_A = sub_memo[c]
# keep jumps sorted by # of terms skipped
_A = 0
while j < len(_snake_case ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_snake_case , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Dict , _snake_case :int ) -> Dict:
if i >= n:
return 0, i
if k > len(_snake_case ):
a_i.extend([0 for _ in range(k - len(_snake_case ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_A = i
_A , _A , _A = 0, 0, 0
for j in range(len(_snake_case ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_A = ds_c + ds_b
diff += addend
_A = 0
for j in range(_snake_case ):
_A = a_i[j] + addend
_A , _A = divmod(_snake_case , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_snake_case , _snake_case , _snake_case )
return diff, i - start_i
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :int , _snake_case :Optional[int] ) -> str:
for j in range(_snake_case , len(_snake_case ) ):
_A = digits[j] + addend
if s >= 10:
_A , _A = divmod(_snake_case , 10 )
_A = addend // 10 + quotient
else:
_A = s
_A = addend // 10
if addend == 0:
break
while addend > 0:
_A , _A = divmod(_snake_case , 10 )
digits.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10**15 ) -> int:
_A = [1]
_A = 1
_A = 0
while True:
_A , _A = next_term(_snake_case , 20 , i + dn , _snake_case )
dn += terms_jumped
if dn == n - i:
break
_A = 0
for j in range(len(_snake_case ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 2 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = "new-model"
if is_tf_available():
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Optional[Any] = NewModelConfig
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = '''bert-base-cased'''
_A = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_A = TFAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = '''bert-base-cased'''
_A = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_A = TFAutoModelForPreTraining.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def snake_case_ ( self : List[Any] ) -> Tuple:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_A = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
_A , _A = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def snake_case_ ( self : str ) -> Optional[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_A = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def snake_case_ ( self : List[str] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_A = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase )
_A , _A = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def snake_case_ ( self : str ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_A = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
_A , _A = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def snake_case_ ( self : int ) -> Dict:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_A = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_A = TFAutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def snake_case_ ( self : Any ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_A = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_A = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
@require_tensorflow_probability
def snake_case_ ( self : List[Any] ) -> str:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_A = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_A = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCAmelCase )
_A , _A = TFAutoModelForTableQuestionAnswering.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[str]:
_A = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_44_10 )
def snake_case_ ( self : List[str] ) -> Tuple:
_A = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_44_10 )
def snake_case_ ( self : Optional[int] ) -> Dict:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
_A = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_A = copy.deepcopy(model.config )
_A = ['''FunnelBaseModel''']
_A = TFAutoModel.from_config(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCAmelCase )
_A = TFAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : List[str] ) -> Optional[int]:
try:
AutoConfig.register('''new-model''' , __lowerCAmelCase )
_A = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__lowerCAmelCase ):
auto_class.register(__lowerCAmelCase , __lowerCAmelCase )
auto_class.register(__lowerCAmelCase , __lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
auto_class.register(__lowerCAmelCase , __lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_A = BertModelTester(self ).get_config()
_A = NewModelConfig(**tiny_config.to_dict() )
_A = auto_class.from_config(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCAmelCase )
_A = auto_class.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case_ ( self : Optional[int] ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCAmelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
_A = TFAutoModel.from_pretrained('''bert-base''' )
def snake_case_ ( self : int ) -> str:
with self.assertRaisesRegex(
__lowerCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_A = TFAutoModel.from_pretrained(__lowerCAmelCase , revision='''aaaaaa''' )
def snake_case_ ( self : Any ) -> List[str]:
with self.assertRaisesRegex(
__lowerCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
_A = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case_ ( self : List[str] ) -> Any:
with self.assertRaisesRegex(__lowerCAmelCase , '''Use `from_pt=True` to load this model''' ):
_A = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def snake_case_ ( self : List[Any] ) -> Dict:
# Make sure we have cached the model.
_A = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
_A = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_A = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
_A = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 2 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> Dict:
_A = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict ) -> Tuple:
_A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_A = s_dict.pop(_snake_case )
elif "subsample" in key:
_A = s_dict.pop(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> str:
_A , _A = emb.weight.shape
_A = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
_A = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Union[str, Any] ) -> Optional[Any]:
_A = torch.load(_snake_case , map_location='''cpu''' )
_A = mam_aaa['''args''']
_A = mam_aaa['''model''']
_A = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(_snake_case )
rename_keys(_snake_case )
_A = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_A = args.share_decoder_input_output_embed
_A = [int(_snake_case ) for i in args.conv_kernel_sizes.split(''',''' )]
_A = SpeechaTextConfig(
vocab_size=_snake_case , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(_snake_case ) , conv_channels=args.conv_channels , conv_kernel_sizes=_snake_case , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_snake_case , num_beams=5 , max_length=200 , use_cache=_snake_case , decoder_start_token_id=2 , early_stopping=_snake_case , )
_A = SpeechaTextForConditionalGeneration(_snake_case )
_A , _A = model.model.load_state_dict(_snake_case , strict=_snake_case )
if len(_snake_case ) > 0 and not set(_snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
_A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_A = lm_head_weights
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
UpperCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 2 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = ["pixel_values"]
def __init__( self : List[str] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 2_55 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , **__lowerCAmelCase : List[Any] , ) -> None:
super().__init__(**__lowerCAmelCase )
_A = size if size is not None else {'''shortest_edge''': 2_24}
_A = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
_A = crop_size if crop_size is not None else {'''height''': 2_56, '''width''': 2_56}
_A = get_size_dict(__lowerCAmelCase , param_name='''crop_size''' )
_A = do_resize
_A = size
_A = resample
_A = do_rescale
_A = rescale_factor
_A = do_center_crop
_A = crop_size
_A = do_flip_channel_order
def snake_case_ ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PIL.Image.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Optional[int] , ) -> np.ndarray:
_A = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
_A = get_resize_output_image_size(__lowerCAmelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict , ) -> np.ndarray:
_A = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(__lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Dict , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[int, float] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Any , ) -> Optional[int]:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(__lowerCAmelCase , data_format=__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : float = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Optional[int] , ) -> PIL.Image.Image:
_A = do_resize if do_resize is not None else self.do_resize
_A = resample if resample is not None else self.resample
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_center_crop if do_center_crop is not None else self.do_center_crop
_A = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_A = size if size is not None else self.size
_A = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
_A = crop_size if crop_size is not None else self.crop_size
_A = get_size_dict(__lowerCAmelCase , param_name='''crop_size''' )
_A = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
_A = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
_A = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
_A = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
_A = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_A = [self.flip_channel_order(image=__lowerCAmelCase ) for image in images]
_A = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
_A = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
def snake_case_ ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Tuple] = None ) -> str:
_A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowerCAmelCase ):
_A = target_sizes.numpy()
_A = []
for idx in range(len(__lowerCAmelCase ) ):
_A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowerCAmelCase )
_A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowerCAmelCase )
else:
_A = logits.argmax(dim=1 )
_A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 2 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def SCREAMING_SNAKE_CASE_ ( *_snake_case :Optional[Any] , _snake_case :Optional[Union[Dict, Any]] = None , _snake_case :Optional[int]=True , _snake_case :Optional[int]=2 ) -> str:
from .. import __version__
_A = take_from
_A = ()
if not isinstance(args[0] , _snake_case ):
_A = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_snake_case ).base_version ) >= version.parse(_snake_case ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_A = None
if isinstance(_snake_case , _snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_snake_case ),)
_A = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_snake_case , _snake_case ):
values += (getattr(_snake_case , _snake_case ),)
_A = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_A = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_A = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _snake_case , stacklevel=_snake_case )
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) > 0:
_A = inspect.getouterframes(inspect.currentframe() )[1]
_A = call_frame.filename
_A = call_frame.lineno
_A = call_frame.function
_A , _A = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_snake_case ) == 0:
return
elif len(_snake_case ) == 1:
return values[0]
return values
| 2 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case_ ( *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Tuple ) -> List[Any]:
pass
@is_pipeline_test
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
a__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case_ ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ) -> int:
_A = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_A = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ) -> Tuple:
_A = vqa_pipeline(__lowerCAmelCase , top_k=1 )
self.assertEqual(
__lowerCAmelCase , [
[{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}],
[{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}],
] , )
@require_torch
def snake_case_ ( self : Dict ) -> Any:
_A = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_A = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_A = '''How many cats are there?'''
_A = vqa_pipeline(image=__lowerCAmelCase , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}, {'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}] )
_A = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}, {'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}] )
@slow
@require_torch
def snake_case_ ( self : Dict ) -> List[str]:
_A = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_A = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_A = '''How many cats are there?'''
_A = vqa_pipeline(image=__lowerCAmelCase , question=__lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_A = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_A = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [[{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
pass
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False, False, False
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
a__ : str = field(default="Audio" , init=_A , repr=_A)
def __call__( self : str ) -> List[str]:
return self.pa_type
def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, bytes, dict] ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_A = BytesIO()
sf.write(__lowerCAmelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_A = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
_A = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67
_A = BytesIO(bytes() )
sf.write(__lowerCAmelCase , __lowerCAmelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : dict , __lowerCAmelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
_A , _A = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
_A = xsplitext(__lowerCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
_A = token_per_repo_id or {}
_A = path.split('''::''' )[-1]
try:
_A = string_to_dict(__lowerCAmelCase , config.HUB_DATASETS_URL )['''repo_id''']
_A = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_A = None
with xopen(__lowerCAmelCase , '''rb''' , use_auth_token=__lowerCAmelCase ) as f:
_A , _A = sf.read(__lowerCAmelCase )
else:
_A , _A = sf.read(__lowerCAmelCase )
_A = array.T
if self.mono:
_A = librosa.to_mono(__lowerCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_A = librosa.resample(__lowerCAmelCase , orig_sr=__lowerCAmelCase , target_sr=self.sampling_rate )
_A = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def snake_case_ ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_A = pa.array([None] * len(__lowerCAmelCase ) , type=pa.binary() )
_A = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_A = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
_A = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
_A = pa.array([Audio().encode_example(__lowerCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
_A = storage.field('''bytes''' )
else:
_A = pa.array([None] * len(__lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
_A = storage.field('''path''' )
else:
_A = pa.array([None] * len(__lowerCAmelCase ) , type=pa.string() )
_A = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(__lowerCAmelCase , self.pa_type )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase : Union[str, Any] ):
with xopen(__lowerCAmelCase , '''rb''' ) as f:
_A = f.read()
return bytes_
_A = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_A = pa.array(
[os.path.basename(__lowerCAmelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
_A = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__lowerCAmelCase , self.pa_type )
| 2 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : Optional[Any] = VQModel
a__ : Optional[int] = "sample"
@property
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Any=(32, 32) ) -> int:
_A = 4
_A = 3
_A = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
return {"sample": image}
@property
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def snake_case_ ( self : Tuple ) -> Tuple:
return (3, 32, 32)
def snake_case_ ( self : Any ) -> Tuple:
_A = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
_A = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self : Optional[Any] ) -> Tuple:
pass
def snake_case_ ( self : Any ) -> Optional[int]:
pass
def snake_case_ ( self : Union[str, Any] ) -> Any:
_A , _A = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__lowerCAmelCase )
_A = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case_ ( self : List[Any] ) -> Optional[Any]:
_A = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(__lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_A = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_A = image.to(__lowerCAmelCase )
with torch.no_grad():
_A = model(__lowerCAmelCase ).sample
_A = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
| 2 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 1 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :List[str]=None ) -> Any:
require_version(deps[pkg] , _snake_case )
| 2 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ ( datasets.BuilderConfig):
"""simple docstring"""
a__ : Optional[datasets.Features] = None
a__ : str = "utf-8"
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : bool = True # deprecated
a__ : Optional[int] = None # deprecated
a__ : int = 10 << 20 # 10MB
a__ : Optional[bool] = None
class lowerCamelCase__ ( datasets.ArrowBasedBuilder):
"""simple docstring"""
a__ : List[Any] = JsonConfig
def snake_case_ ( self : Any ) -> List[str]:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
_A = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def snake_case_ ( self : int , __lowerCAmelCase : Dict ) -> Any:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCAmelCase , (str, list, tuple) ):
_A = data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [files]
_A = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_A = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [files]
_A = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def snake_case_ ( self : Dict , __lowerCAmelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_A = self.config.features.arrow_schema.field(__lowerCAmelCase ).type
_A = pa_table.append_column(__lowerCAmelCase , pa.array([None] * len(__lowerCAmelCase ) , type=__lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_A = table_cast(__lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def snake_case_ ( self : Any , __lowerCAmelCase : str ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_A = json.load(__lowerCAmelCase )
# We keep only the field we are interested in
_A = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCAmelCase , (list, tuple) ):
_A = set().union(*[row.keys() for row in dataset] )
_A = {col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
else:
_A = dataset
_A = pa.Table.from_pydict(__lowerCAmelCase )
yield file_idx, self._cast_table(__lowerCAmelCase )
# If the file has one json object per line
else:
with open(__lowerCAmelCase , '''rb''' ) as f:
_A = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_A = max(self.config.chunksize // 32 , 16 << 10 )
_A = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
_A = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_A = batch.decode(self.config.encoding , errors=__lowerCAmelCase ).encode('''utf-8''' )
try:
while True:
try:
_A = paj.read_json(
io.BytesIO(__lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=__lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCAmelCase )
or block_size > len(__lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(__lowerCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_A = json.load(__lowerCAmelCase )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
_A = set().union(*[row.keys() for row in dataset] )
_A = {col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
_A = pa.Table.from_pydict(__lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__lowerCAmelCase )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase )
batch_idx += 1
| 2 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : Any = ["torch", "transformers", "onnx"]
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : str ) -> int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Optional[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : int ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Optional[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[int] ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : str = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[int] ) -> Dict:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Any , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Tuple ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *__lowerCAmelCase : Dict , **__lowerCAmelCase : int ) -> List[Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : List[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : List[str] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Any ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Any , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[int] ) -> Optional[int]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Union[str, Any] , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : List[str] ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Tuple , *__lowerCAmelCase : Dict , **__lowerCAmelCase : List[Any] ) -> List[str]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__( self : List[Any] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : str ) -> List[Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Union[str, Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : str ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Union[str, Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : Any = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Dict , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 2 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = ""
a__ : Optional[int] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : List[str] , __lowerCAmelCase : Optional[DatasetInfo] = None , __lowerCAmelCase : Optional[str] = None , **__lowerCAmelCase : str , ) -> Tuple:
super().__init__(self , **__lowerCAmelCase )
_A = repo_info
_A = token
_A = None
def snake_case_ ( self : Dict ) -> str:
if self.dir_cache is None:
_A = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_A = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str = "rb" , **__lowerCAmelCase : Tuple , ) -> Dict:
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
_A = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : List[str] , **__lowerCAmelCase : int ) -> Optional[Any]:
self._get_dirs()
_A = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int=False , **__lowerCAmelCase : Any ) -> Union[str, Any]:
self._get_dirs()
_A = PurePosixPath(path.strip('''/''' ) )
_A = {}
for p, f in self.dir_cache.items():
_A = PurePosixPath(p.strip('''/''' ) )
_A = p.parent
if root == path:
_A = f
_A = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 2 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Any = "xlnet"
a__ : Dict = ["mems"]
a__ : List[str] = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Dict=3_20_00 , __lowerCAmelCase : List[str]=10_24 , __lowerCAmelCase : Dict=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=40_96 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]="bi" , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Any="last" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="tanh" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=2 , **__lowerCAmelCase : List[str] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs['''use_cache''']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 2 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
a__ : Tuple = JukeboxTokenizer
a__ : Union[str, Any] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def snake_case_ ( self : str ) -> Union[str, Any]:
import torch
_A = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
_A = tokenizer(**self.metas )['''input_ids''']
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
import torch
_A = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
_A = tokenizer(**self.metas )['''input_ids''']
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCAmelCase_ = get_logger()
UpperCAmelCase_ = None
class lowerCamelCase__ ( TensorFormatter[Mapping, "jax.Array", Mapping]):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : int ) -> Optional[int]:
super().__init__(features=__lowerCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__lowerCAmelCase )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
_A = device if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_A = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
_A = str(jax.devices()[0] )
_A = jnp_array_kwargs
@staticmethod
def snake_case_ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__lowerCAmelCase ): device for device in jax.devices()}
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
import jax
import jax.numpy as jnp
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and column:
if all(
isinstance(__lowerCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__lowerCAmelCase , axis=0 )
return column
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
import jax
import jax.numpy as jnp
if isinstance(__lowerCAmelCase , (str, bytes, type(__lowerCAmelCase )) ):
return value
elif isinstance(__lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_A = {}
if isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_A = {'''dtype''': jnp.intaa}
else:
_A = {'''dtype''': jnp.intaa}
elif isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_A = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCAmelCase , PIL.Image.Image ):
_A = np.asarray(__lowerCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_A = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__lowerCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> Optional[int]:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__lowerCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__lowerCAmelCase , '''__array__''' ) and not isinstance(__lowerCAmelCase , jax.Array ):
_A = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : dict ) -> Optional[Any]:
return map_nested(self._recursive_tensorize , __lowerCAmelCase , map_list=__lowerCAmelCase )
def snake_case_ ( self : Tuple , __lowerCAmelCase : pa.Table ) -> Mapping:
_A = self.numpy_arrow_extractor().extract_row(__lowerCAmelCase )
_A = self.python_features_decoder.decode_row(__lowerCAmelCase )
return self.recursive_tensorize(__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : pa.Table ) -> "jax.Array":
_A = self.numpy_arrow_extractor().extract_column(__lowerCAmelCase )
_A = self.python_features_decoder.decode_column(__lowerCAmelCase , pa_table.column_names[0] )
_A = self.recursive_tensorize(__lowerCAmelCase )
_A = self._consolidate(__lowerCAmelCase )
return column
def snake_case_ ( self : Any , __lowerCAmelCase : pa.Table ) -> Mapping:
_A = self.numpy_arrow_extractor().extract_batch(__lowerCAmelCase )
_A = self.python_features_decoder.decode_batch(__lowerCAmelCase )
_A = self.recursive_tensorize(__lowerCAmelCase )
for column_name in batch:
_A = self._consolidate(batch[column_name] )
return batch
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> bool:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_snake_case ) == 1:
return True
_A = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> float:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_A = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : List[str] ) -> int:
debug_launcher(test_script.main )
def snake_case_ ( self : Optional[Any] ) -> str:
debug_launcher(test_ops.main )
| 2 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_A = QuantumRegister(_snake_case , '''qr''' )
_A = ClassicalRegister(_snake_case , '''cr''' )
_A = QuantumCircuit(_snake_case , _snake_case )
_A = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_A = Aer.get_backend('''qasm_simulator''' )
_A = execute(_snake_case , _snake_case , shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 2 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict ) -> tuple:
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE_ ( _snake_case :np.ndarray , _snake_case :np.ndarray , _snake_case :np.ndarray ) -> np.ndarray:
_A = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_snake_case , _snake_case )
# Predict target for test data
_A = xgb.predict(_snake_case )
_A = predictions.reshape(len(_snake_case ) , 1 )
return predictions
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = fetch_california_housing()
_A , _A = data_handling(_snake_case )
_A , _A , _A , _A = train_test_split(
_snake_case , _snake_case , test_size=0.25 , random_state=1 )
_A = xgboost(_snake_case , _snake_case , _snake_case )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(_snake_case , _snake_case )}''' )
print(F'''Mean Square Error : {mean_squared_error(_snake_case , _snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 2 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Any , _snake_case :int , _snake_case :List[Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_A = getattr(_snake_case , _snake_case )
if weight_type is not None:
_A = getattr(_snake_case , _snake_case ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :int ) -> Any:
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(_snake_case )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[str] , _snake_case :List[str] , _snake_case :Optional[int] , _snake_case :List[Any] ) -> Any:
_A = full_name.split('''conv_layers.''' )[-1]
_A = name.split('''.''' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Dict ) -> Tuple:
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1E-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(_snake_case )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Union[str, Any] , _snake_case :Optional[Any]=None , _snake_case :Optional[int]=None , _snake_case :Dict=True ) -> List[Any]:
if is_finetuned:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(_snake_case )
else:
_A = convert_config(model[0] , _snake_case )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _snake_case )
_A = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
_A = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
_A = SEWForCTC(_snake_case )
else:
_A = SEWModel(_snake_case )
feature_extractor.save_pretrained(_snake_case )
recursively_load_weights(_snake_case , _snake_case , _snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 2 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[Any] , _snake_case :Optional[Any] ) -> List[str]:
_A = UniSpeechSatForSequenceClassification.from_pretrained(_snake_case , config=_snake_case )
_A = downstream_dict['''projector.weight''']
_A = downstream_dict['''projector.bias''']
_A = downstream_dict['''model.post_net.linear.weight''']
_A = downstream_dict['''model.post_net.linear.bias''']
return model
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Union[str, Any] , _snake_case :List[str] ) -> str:
_A = UniSpeechSatForAudioFrameClassification.from_pretrained(_snake_case , config=_snake_case )
_A = downstream_dict['''model.linear.weight''']
_A = downstream_dict['''model.linear.bias''']
return model
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :str , _snake_case :str ) -> List[str]:
_A = UniSpeechSatForXVector.from_pretrained(_snake_case , config=_snake_case )
_A = downstream_dict['''connector.weight''']
_A = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_A = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_A = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_A = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
_A = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
_A = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
_A = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
_A = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :List[str] , _snake_case :Union[str, Any] , _snake_case :List[str] ) -> Any:
_A = torch.load(_snake_case , map_location='''cpu''' )
_A = checkpoint['''Downstream''']
_A = UniSpeechSatConfig.from_pretrained(_snake_case )
_A = WavaVecaFeatureExtractor.from_pretrained(
_snake_case , return_attention_mask=_snake_case , do_normalize=_snake_case )
_A = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
_A = convert_classification(_snake_case , _snake_case , _snake_case )
elif arch.endswith('''ForAudioFrameClassification''' ):
_A = convert_diarization(_snake_case , _snake_case , _snake_case )
elif arch.endswith('''ForXVector''' ):
_A = convert_xvector(_snake_case , _snake_case , _snake_case )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_A = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
UpperCAmelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 2 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case_ ( *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def snake_case_ ( self : int ) -> Optional[int]:
_A = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
_A = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
{'''score''': 0.333, '''label''': ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def snake_case_ ( self : Optional[int] ) -> int:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case_ ( self : Optional[int] ) -> Dict:
_A = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_A = image_classifier(__lowerCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
_A = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 2 | 1 |
from functools import lru_cache
@lru_cache
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = tempfile.mkdtemp()
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ) -> Optional[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> List[str]:
_A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Optional[Any] ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : List[str] = IFPipeline
a__ : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
a__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self : Dict ) -> Optional[Any]:
return self._get_dummy_components()
def snake_case_ ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=0 ) -> List[str]:
if str(__lowerCAmelCase ).startswith('''mps''' ):
_A = torch.manual_seed(__lowerCAmelCase )
else:
_A = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_A = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_ ( self : str ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def snake_case_ ( self : Union[str, Any] ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Union[str, Any]:
self._test_save_load_local()
def snake_case_ ( self : Dict ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case_ ( self : Dict ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self : Tuple ) -> str:
# if
_A = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
_A = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
_A , _A = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_A = None
_A = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_A = IFImgaImgPipeline(**pipe_a.components )
_A = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_A = IFInpaintingPipeline(**pipe_a.components )
_A = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , num_inference_steps=2 , generator=__lowerCAmelCase , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=2 , generator=__lowerCAmelCase , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , original_image=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__lowerCAmelCase )
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , num_inference_steps=2 , generator=__lowerCAmelCase , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(__lowerCAmelCase )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , original_image=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 2 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = (DPMSolverSDEScheduler,)
a__ : Any = 10
def snake_case_ ( self : List[Any] , **__lowerCAmelCase : List[Any] ) -> str:
_A = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__lowerCAmelCase )
return config
def snake_case_ ( self : Tuple ) -> str:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> Any:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def snake_case_ ( self : Any ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def snake_case_ ( self : Any ) -> Optional[Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_A = model(__lowerCAmelCase , __lowerCAmelCase )
_A = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(__lowerCAmelCase ) )
_A = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def snake_case_ ( self : Dict ) -> str:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_A = model(__lowerCAmelCase , __lowerCAmelCase )
_A = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(__lowerCAmelCase ) )
_A = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def snake_case_ ( self : int ) -> Any:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(__lowerCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_A = model(__lowerCAmelCase , __lowerCAmelCase )
_A = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(__lowerCAmelCase ) )
_A = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def snake_case_ ( self : Tuple ) -> Optional[Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__lowerCAmelCase , use_karras_sigmas=__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(__lowerCAmelCase ) * scheduler.init_noise_sigma
_A = sample.to(__lowerCAmelCase )
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_A = model(__lowerCAmelCase , __lowerCAmelCase )
_A = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(__lowerCAmelCase ) )
_A = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 2 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Dict=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=1 / 2_55 , __lowerCAmelCase : int=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def snake_case_ ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ) -> Dict:
if not batched:
_A = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_A = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[int] ) -> Any:
_A = DeformableDetrImageProcessingTester(self )
@property
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def snake_case_ ( self : List[str] ) -> int:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
def snake_case_ ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Tuple ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__lowerCAmelCase , annotations=__lowerCAmelCase , masks_path=__lowerCAmelCase , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCAmelCase ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCAmelCase )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCAmelCase ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCAmelCase ) )
| 2 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int ) -> Tuple:
inspect_dataset(_snake_case , _snake_case )
_A = path + '''.py'''
assert script_name in os.listdir(_snake_case )
assert "__pycache__" not in os.listdir(_snake_case )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] , _snake_case :Union[str, Any] ) -> Any:
inspect_metric(_snake_case , _snake_case )
_A = path + '''.py'''
assert script_name in os.listdir(_snake_case )
assert "__pycache__" not in os.listdir(_snake_case )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :List[str] , _snake_case :str ) -> Union[str, Any]:
_A = get_dataset_config_info(_snake_case , config_name=_snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :str , _snake_case :List[str] ) -> Any:
with pytest.raises(_snake_case ):
get_dataset_config_info(_snake_case , config_name=_snake_case )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int ) -> List[str]:
_A = get_dataset_config_names(_snake_case )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :Any ) -> Optional[Any]:
_A = get_dataset_infos(_snake_case )
assert list(infos.keys() ) == expected_configs
_A = expected_configs[0]
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Optional[Any] , _snake_case :Any ) -> str:
_A = get_dataset_infos(_snake_case )
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Tuple , _snake_case :Dict ) -> List[str]:
with pytest.raises(_snake_case ):
get_dataset_split_names(_snake_case , config_name=_snake_case )
| 2 |
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCAmelCase_ = get_logger(__name__)
UpperCAmelCase_ = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class lowerCamelCase__ :
"""simple docstring"""
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : List[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCamelCase__ :
"""simple docstring"""
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : List[str] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCamelCase__ ( _A):
"""simple docstring"""
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : Optional[int] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int , **__lowerCAmelCase : Tuple ) -> jnp.ndarray:
for processor in self:
_A = inspect.signature(processor.__call__ ).parameters
if len(__lowerCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
_A = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
else:
_A = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return scores
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : float ) -> Tuple:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
_A = temperature
def __call__( self : Optional[int] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ) -> jnp.ndarray:
_A = scores / self.temperature
return scores
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : str , __lowerCAmelCase : float , __lowerCAmelCase : float = -float('''Inf''' ) , __lowerCAmelCase : int = 1 ) -> Any:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
_A = top_p
_A = filter_value
_A = min_tokens_to_keep
def __call__( self : str , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ) -> jnp.ndarray:
_A , _A = lax.top_k(__lowerCAmelCase , scores.shape[-1] )
_A = jnp.full_like(__lowerCAmelCase , self.filter_value )
_A = jax.nn.softmax(__lowerCAmelCase , axis=-1 ).cumsum(axis=-1 )
_A = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_A = jnp.roll(__lowerCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(__lowerCAmelCase )
# min tokens to keep
_A = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCAmelCase )
_A = jnp.where(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = jax.lax.sort_key_val(__lowerCAmelCase , __lowerCAmelCase )[-1]
return next_scores
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = -float('''Inf''' ) , __lowerCAmelCase : int = 1 ) -> Optional[Any]:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
_A = max(__lowerCAmelCase , __lowerCAmelCase )
_A = filter_value
def __call__( self : Union[str, Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ) -> jnp.ndarray:
_A , _A = scores.shape
_A = jnp.full(batch_size * vocab_size , self.filter_value )
_A = min(self.top_k , scores.shape[-1] ) # Safety check
_A , _A = lax.top_k(__lowerCAmelCase , __lowerCAmelCase )
_A = jnp.broadcast_to((jnp.arange(__lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_A = topk_scores.flatten()
_A = topk_indices.flatten() + shift
_A = next_scores_flat.at[topk_indices_flat].set(__lowerCAmelCase )
_A = next_scores_flat.reshape(__lowerCAmelCase , __lowerCAmelCase )
return next_scores
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : str , __lowerCAmelCase : int ) -> int:
_A = bos_token_id
def __call__( self : Union[str, Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ) -> jnp.ndarray:
_A = jnp.full(scores.shape , -float('''inf''' ) )
_A = 1 - jnp.bool_(cur_len - 1 )
_A = jnp.where(__lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowerCAmelCase )
return scores
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Any:
_A = max_length
_A = eos_token_id
def __call__( self : List[str] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ) -> jnp.ndarray:
_A = jnp.full(scores.shape , -float('''inf''' ) )
_A = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_A = jnp.where(__lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowerCAmelCase )
return scores
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Any:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
_A = min_length
_A = eos_token_id
def __call__( self : Dict , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
_A = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_A = jnp.where(__lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , __lowerCAmelCase )
return scores
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
_A = list(__lowerCAmelCase )
_A = begin_index
def __call__( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Optional[Any]:
_A = 1 - jnp.bool_(cur_len - self.begin_index )
_A = jnp.where(__lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , __lowerCAmelCase )
return scores
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : list ) -> Dict:
_A = list(__lowerCAmelCase )
def __call__( self : str , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ) -> jnp.ndarray:
_A = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : int , __lowerCAmelCase : Optional[Any] ) -> List[Any]:
_A = dict(__lowerCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_A = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_A = force_token_array.at[index].set(__lowerCAmelCase )
_A = jnp.intaa(__lowerCAmelCase )
def __call__( self : Optional[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ) -> jnp.ndarray:
def _force_token(__lowerCAmelCase : List[Any] ):
_A = scores.shape[0]
_A = self.force_token_array[generation_idx]
_A = jnp.ones_like(__lowerCAmelCase , dtype=scores.dtype ) * -float('''inf''' )
_A = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_A = lax.dynamic_update_slice(__lowerCAmelCase , __lowerCAmelCase , (0, current_token) )
return new_scores
_A = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowerCAmelCase ) , lambda: scores , ) , )
return scores
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ) -> int:
_A = generate_config.eos_token_id
_A = generate_config.no_timestamps_token_id
_A = generate_config.no_timestamps_token_id + 1
_A = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__lowerCAmelCase , '''max_initial_timestamp_index''' ):
_A = generate_config.max_initial_timestamp_index
else:
_A = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_A = model_config.vocab_size
def __call__( self : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ) -> Union[str, Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
_A = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(__lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ):
_A = jnp.where((cur_len - self.begin_index) >= 1 , __lowerCAmelCase , __lowerCAmelCase )
_A = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowerCAmelCase , )
_A = jnp.where((cur_len - self.begin_index) < 2 , __lowerCAmelCase , __lowerCAmelCase )
_A = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowerCAmelCase , __lowerCAmelCase , )
return jnp.where(
__lowerCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , __lowerCAmelCase , )
_A = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase )
_A = jnp.where(cur_len == self.begin_index , __lowerCAmelCase , __lowerCAmelCase )
_A = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowerCAmelCase , )
_A = self.timestamp_begin + self.max_initial_timestamp_index
_A = jnp.where(
__lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , __lowerCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
_A = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 )
def handle_cumulative_probs(__lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
_A = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_A = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , __lowerCAmelCase , )
_A = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase )
return scores
| 2 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | 1 |
from pathlib import Path
import fire
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str , _snake_case :int ) -> int:
_A = Path(_snake_case )
_A = Path(_snake_case )
dest_dir.mkdir(exist_ok=_snake_case )
for path in src_dir.iterdir():
_A = [x.rstrip() for x in list(path.open().readlines() )][:n]
_A = dest_dir.joinpath(path.name )
print(_snake_case )
dest_path.open('''w''' ).write('''\n'''.join(_snake_case ) )
if __name__ == "__main__":
fire.Fire(minify)
| 2 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Optional[int] = "char"
a__ : str = "bpe"
a__ : Union[str, Any] = "wp"
UpperCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : str = ["image_processor", "char_tokenizer"]
a__ : Optional[int] = "ViTImageProcessor"
a__ : Optional[Any] = "MgpstrTokenizer"
def __init__( self : Any , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Any ) -> Optional[int]:
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs.pop('''feature_extractor''' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
_A = tokenizer
_A = AutoTokenizer.from_pretrained('''gpt2''' )
_A = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : List[Any] , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : str=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : str ) -> Optional[int]:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
_A = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_A = self.char_tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_A = encodings['''input_ids''']
return inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple ) -> Union[str, Any]:
_A , _A , _A = sequences
_A = char_preds.size(0 )
_A , _A = self._decode_helper(__lowerCAmelCase , '''char''' )
_A , _A = self._decode_helper(__lowerCAmelCase , '''bpe''' )
_A , _A = self._decode_helper(__lowerCAmelCase , '''wp''' )
_A = []
_A = []
for i in range(__lowerCAmelCase ):
_A = [char_scores[i], bpe_scores[i], wp_scores[i]]
_A = [char_strs[i], bpe_strs[i], wp_strs[i]]
_A = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_A = {}
_A = final_strs
_A = final_scores
_A = char_strs
_A = bpe_strs
_A = wp_strs
return out
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ) -> List[str]:
if format == DecodeType.CHARACTER:
_A = self.char_decode
_A = 1
_A = '''[s]'''
elif format == DecodeType.BPE:
_A = self.bpe_decode
_A = 2
_A = '''#'''
elif format == DecodeType.WORDPIECE:
_A = self.wp_decode
_A = 1_02
_A = '''[SEP]'''
else:
raise ValueError(f'''Format {format} is not supported.''' )
_A , _A = [], []
_A = pred_logits.size(0 )
_A = pred_logits.size(1 )
_A , _A = pred_logits.topk(1 , dim=-1 , largest=__lowerCAmelCase , sorted=__lowerCAmelCase )
_A = preds_index.view(-1 , __lowerCAmelCase )[:, 1:]
_A = decoder(__lowerCAmelCase )
_A , _A = torch.nn.functional.softmax(__lowerCAmelCase , dim=2 ).max(dim=2 )
_A = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_A = preds_str[index].find(__lowerCAmelCase )
_A = preds_str[index][:pred_eos]
_A = preds_index[index].cpu().tolist()
_A = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_A = preds_max_prob[index][: pred_eos_index + 1]
_A = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def snake_case_ ( self : Dict , __lowerCAmelCase : Tuple ) -> Dict:
_A = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def snake_case_ ( self : Dict , __lowerCAmelCase : List[str] ) -> Union[str, Any]:
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def snake_case_ ( self : Any , __lowerCAmelCase : str ) -> List[Any]:
_A = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
| 2 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 1 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Any , __lowerCAmelCase : List[Any] , ) -> Tuple:
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = False
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = '''gelu'''
_A = 0.1
_A = 0.1
_A = 5_12
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def snake_case_ ( self : str ) -> Optional[int]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Any:
_A = TFDistilBertModel(config=__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
_A = [input_ids, input_mask]
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = TFDistilBertForMaskedLM(config=__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Optional[Any]:
_A = TFDistilBertForQuestionAnswering(config=__lowerCAmelCase )
_A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> Tuple:
_A = self.num_labels
_A = TFDistilBertForSequenceClassification(__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ) -> str:
_A = self.num_choices
_A = TFDistilBertForMultipleChoice(__lowerCAmelCase )
_A = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ) -> Dict:
_A = self.num_labels
_A = TFDistilBertForTokenClassification(__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Optional[int] ) -> Any:
_A = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A)) = config_and_inputs
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : Optional[int] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a__ : Dict = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : str = False
a__ : Optional[Any] = False
def snake_case_ ( self : int ) -> Optional[int]:
_A = TFDistilBertModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase , dim=37 )
def snake_case_ ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case_ ( self : Optional[Any] ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCAmelCase )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_A = TFDistilBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def snake_case_ ( self : Dict ) -> Union[str, Any]:
_A = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(__lowerCAmelCase )[0]
_A = [1, 6, 7_68]
self.assertEqual(output.shape , __lowerCAmelCase )
_A = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
| 2 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : WhisperForConditionalGeneration , __lowerCAmelCase : WhisperProcessor , __lowerCAmelCase : AutoencoderKL , __lowerCAmelCase : CLIPTextModel , __lowerCAmelCase : CLIPTokenizer , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCAmelCase : StableDiffusionSafetyChecker , __lowerCAmelCase : CLIPImageProcessor , ) -> Dict:
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=__lowerCAmelCase , speech_processor=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , )
def snake_case_ ( self : List[str] , __lowerCAmelCase : Optional[Union[str, int]] = "auto" ) -> List[str]:
if slice_size == "auto":
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCAmelCase )
def snake_case_ ( self : int ) -> List[Any]:
self.enable_attention_slicing(__lowerCAmelCase )
@torch.no_grad()
def __call__( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=1_60_00 , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : float = 7.5 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , **__lowerCAmelCase : Optional[int] , ) -> Optional[Any]:
_A = self.speech_processor.feature_extractor(
__lowerCAmelCase , return_tensors='''pt''' , sampling_rate=__lowerCAmelCase ).input_features.to(self.device )
_A = self.speech_model.generate(__lowerCAmelCase , max_length=48_00_00 )
_A = self.speech_processor.tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , normalize=__lowerCAmelCase )[
0
]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = 1
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = len(__lowerCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__lowerCAmelCase )}.''' )
# get prompt text embeddings
_A = self.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_A = text_input_ids[:, : self.tokenizer.model_max_length]
_A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_A , _A , _A = text_embeddings.shape
_A = text_embeddings.repeat(1 , __lowerCAmelCase , 1 )
_A = text_embeddings.view(bs_embed * num_images_per_prompt , __lowerCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_A = 42
if negative_prompt is None:
_A = [''''''] * batch_size
elif type(__lowerCAmelCase ) is not type(__lowerCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCAmelCase )} !='''
f''' {type(__lowerCAmelCase )}.''' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [negative_prompt]
elif batch_size != len(__lowerCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__lowerCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
_A = negative_prompt
_A = text_input_ids.shape[-1]
_A = self.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' , )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A = uncond_embeddings.shape[1]
_A = uncond_embeddings.repeat(1 , __lowerCAmelCase , 1 )
_A = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_A = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device='''cpu''' , dtype=__lowerCAmelCase ).to(
self.device )
else:
_A = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# predict the noise residual
_A = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_A , _A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = 1 / 0.1_8215 * latents
_A = self.vae.decode(__lowerCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__lowerCAmelCase , nsfw_content_detected=__lowerCAmelCase )
| 2 |
UpperCAmelCase_ = 2_5_6
# Modulus to hash a string
UpperCAmelCase_ = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :str ) -> bool:
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_A = '''abc1abc12'''
_A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_A = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = '''ABABX'''
_A = '''ABABZABABYABABX'''
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = '''AAAB'''
_A = '''ABAAAAAB'''
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = '''abcdabcy'''
_A = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = '''Lü'''
_A = '''Lüsai'''
assert rabin_karp(_snake_case , _snake_case )
_A = '''Lue'''
assert not rabin_karp(_snake_case , _snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 2 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase_ = pytest.mark.integration
UpperCAmelCase_ = {"""comet"""}
UpperCAmelCase_ = importlib.util.find_spec("""fairseq""") is not None
UpperCAmelCase_ = {"""code_eval"""}
UpperCAmelCase_ = os.name == """nt"""
UpperCAmelCase_ = {"""bertscore""", """frugalscore""", """perplexity"""}
UpperCAmelCase_ = importlib.util.find_spec("""transformers""") is not None
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> List[str]:
@wraps(_snake_case )
def wrapper(self :Optional[Any] , _snake_case :Dict ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , _snake_case )
return wrapper
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple ) -> str:
@wraps(_snake_case )
def wrapper(self :Optional[int] , _snake_case :List[str] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , _snake_case )
return wrapper
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> Union[str, Any]:
@wraps(_snake_case )
def wrapper(self :List[Any] , _snake_case :str ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , _snake_case )
return wrapper
def SCREAMING_SNAKE_CASE_ ( ) -> str:
_A = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(
_A , _A , _A)
@local
class lowerCamelCase__ ( parameterized.TestCase):
"""simple docstring"""
a__ : List[Any] = {}
a__ : Tuple = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def snake_case_ ( self : Any , __lowerCAmelCase : Any ) -> List[Any]:
_A = '''[...]'''
_A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCAmelCase ) ).module_path )
_A = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCAmelCase )
# check parameters
_A = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__lowerCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
_A = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def snake_case_ ( self : int , __lowerCAmelCase : int ) -> Optional[Any]:
_A = '''[...]'''
_A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
_A = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ) -> Optional[int]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCAmelCase ):
yield
else:
yield
@contextmanager
def snake_case_ ( self : Optional[int] ) -> List[Any]:
def load_local_metric(__lowerCAmelCase : List[str] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[int] ):
return load_metric(os.path.join('''metrics''' , __lowerCAmelCase ) , *__lowerCAmelCase , **__lowerCAmelCase )
with patch('''datasets.load_metric''' ) as mock_load_metric:
_A = load_local_metric
yield
@classmethod
def snake_case_ ( cls : Dict , __lowerCAmelCase : Any ) -> List[str]:
def wrapper(__lowerCAmelCase : Optional[int] ):
_A = contextmanager(__lowerCAmelCase )
_A = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Any:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class lowerCamelCase__ ( _A):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
_A = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> Any:
import torch
def bert_cos_score_idf(_snake_case :List[Any] , _snake_case :Optional[int] , *_snake_case :Union[str, Any] , **_snake_case :Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_snake_case ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
_A = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> Union[str, Any]:
def load_from_checkpoint(_snake_case :Dict ):
class lowerCamelCase__ :
"""simple docstring"""
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Any , *__lowerCAmelCase : Any , **__lowerCAmelCase : str ) -> Tuple:
assert len(__lowerCAmelCase ) == 2
_A = [0.19, 0.92]
return scores, sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
_A = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
_A = load_from_checkpoint
yield
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
_A = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
_A = '''ERROR'''
_A = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(_snake_case , match=re.escape(_snake_case ) ):
metric.compute(predictions=[] , references=[] , scheme=_snake_case )
| 2 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Dict , __lowerCAmelCase : List[str]=0.01 , __lowerCAmelCase : Optional[Any]=10_00 ) -> Tuple:
_A = p_stop
_A = max_length
def __iter__( self : Tuple ) -> int:
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : str=True ) -> Optional[int]:
_A = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
_A = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
# Check the shards when the dataset is a round multiple of total batch size.
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_A = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def snake_case_ ( self : str ) -> Dict:
# Check the shards when the dataset is a round multiple of batch size.
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> Any:
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Any=2 , __lowerCAmelCase : str=False ) -> Any:
random.seed(__lowerCAmelCase )
_A = list(__lowerCAmelCase )
_A = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def snake_case_ ( self : int ) -> str:
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def snake_case_ ( self : Dict ) -> str:
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_A = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case_ ( self : Dict ) -> List[str]:
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case_ ( self : Any ) -> List[Any]:
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def snake_case_ ( self : Dict ) -> Any:
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def snake_case_ ( self : List[Any] ) -> List[str]:
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 2 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""T""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = []
_A = {}
_A = 0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def snake_case_ ( self : str ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_A = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_A , _A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_A , _A = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def snake_case_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Update the weight of the given key
_A = self.position_map[elem]
_A = (elem, weight)
if position > 0:
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_A = self.position_map[elem]
if curr_pos == 0:
return None
_A = get_parent_position(__lowerCAmelCase )
_A , _A = self.heap[curr_pos]
_A , _A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def snake_case_ ( self : Dict , __lowerCAmelCase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_A = self.position_map[elem]
_A , _A = self.heap[curr_pos]
_A = get_child_left_position(__lowerCAmelCase )
_A = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
_A , _A = self.heap[child_left_position]
_A , _A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
_A , _A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
_A , _A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def snake_case_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
# Swap the nodes at the given positions
_A = self.heap[nodea_pos][0]
_A = self.heap[nodea_pos][0]
_A , _A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_A = nodea_pos
_A = nodea_pos
class lowerCamelCase__ ( Generic[T]):
"""simple docstring"""
def __init__( self : str ) -> None:
_A = {}
_A = 0
def __repr__( self : str ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def snake_case_ ( self : Any , __lowerCAmelCase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_A = {}
self.nodes += 1
def snake_case_ ( self : str , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
_A = weight
_A = weight
def SCREAMING_SNAKE_CASE_ ( _snake_case :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
_A = {node: maxsize for node in graph.connections}
_A = {node: None for node in graph.connections}
_A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case , _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_A = priority_queue.extract_min()
_A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
# running prim's algorithm
while not priority_queue.is_empty():
_A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case , dist[neighbour] )
_A = node
return dist, parent
| 2 | 1 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_A = gray_code_sequence_string(_snake_case )
#
# convert them to integers
for i in range(len(_snake_case ) ):
_A = int(sequence[i] , 2 )
return sequence
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_A = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_A = gray_code_sequence_string(bit_count - 1 )
_A = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_A = '''0''' + smaller_sequence[i]
sequence.append(_snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_A = '''1''' + smaller_sequence[i]
sequence.append(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCAmelCase_ = {"""vinai/bartpho-syllable""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_A = vocab_file
_A = monolingual_vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_A = {}
_A = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = cnt
cnt += 1
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
_A = line.strip().split()[0]
_A = len(self.fairseq_tokens_to_ids )
if str(__lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
_A = len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> List[Any]:
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ) -> List[Any]:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def snake_case_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , __lowerCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self : int , __lowerCAmelCase : Optional[int] ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
_A = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 2 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase__ :
"""simple docstring"""
a__ : str = PegasusConfig
a__ : str = {}
a__ : int = "gelu"
def __init__( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Any=False , __lowerCAmelCase : Union[str, Any]=99 , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=37 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : int=20 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : Dict=0 , ) -> List[str]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = eos_token_id
_A = pad_token_id
_A = bos_token_id
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
_A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_A = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_A = np.concatenate([input_ids, eos_tensor] , axis=1 )
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ) -> List[Any]:
_A = 20
_A = model_class_name(__lowerCAmelCase )
_A = model.encode(inputs_dict['''input_ids'''] )
_A , _A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_A = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_A = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_A = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_A = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ) -> str:
_A = 20
_A = model_class_name(__lowerCAmelCase )
_A = model.encode(inputs_dict['''input_ids'''] )
_A , _A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_A = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_A = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_A = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_A = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[Any] , _snake_case :Union[str, Any] , _snake_case :Union[str, Any]=None , _snake_case :str=None , ) -> List[str]:
if attention_mask is None:
_A = np.not_equal(_snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_A = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : List[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a__ : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a__ : Any = True
a__ : Tuple = False
a__ : Optional[int] = False
a__ : Union[str, Any] = False
def snake_case_ ( self : Union[str, Any] ) -> int:
_A = FlaxPegasusModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> Union[str, Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : List[str] ) -> List[Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_A = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Any ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
_A = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = model_class(__lowerCAmelCase )
_A = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_A = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest('''JIT Enabled''' ):
_A = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
for model_class_name in self.all_model_classes:
_A = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__lowerCAmelCase )
_A = np.ones((1, 1) )
_A = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def snake_case_ ( self : List[str] ) -> str:
_A = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
_A = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
_A = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_A = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_A = tokenizer(__lowerCAmelCase , return_tensors='''np''' , truncation=__lowerCAmelCase , max_length=5_12 , padding=__lowerCAmelCase )
_A = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_A = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 2 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :dict , _snake_case :str ) -> set[str]:
_A , _A = set(_snake_case ), [start]
while stack:
_A = stack.pop()
explored.add(_snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_snake_case )
return explored
UpperCAmelCase_ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 2 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.